diff mbox series

[RFC,XEN,04/23] riscv: header files

Message ID ba35fbe225eba9edc575e7efc7c6a89633b6e5b7.1579615303.git.bobbyeshleman@gmail.com (mailing list archive)
State New, archived
Headers show
Series xen: beginning support for RISC-V | expand

Commit Message

Bobby Eshleman Jan. 22, 2020, 1:58 a.m. UTC
This patch adds all of the header files.

The address spaces defined in config.h are somewhat modeled after ARM64
but due to the smaller address width for RISC-V SV39 the address spaces
there is definitely a difference.  My choices here were pretty arbitrary
and I'd some feedback on them.

Co-authored-by: Alistair Francis <alistair.francis@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
Signed-off-by: Bobby Eshleman <bobbyeshleman@gmail.com>
---
 xen/include/asm-riscv/altp2m.h           |  39 ++
 xen/include/asm-riscv/asm.h              |  76 +++
 xen/include/asm-riscv/atomic.h           | 249 +++++++++
 xen/include/asm-riscv/bitops.h           | 331 +++++++++++
 xen/include/asm-riscv/bug.h              |  59 ++
 xen/include/asm-riscv/byteorder.h        |  16 +
 xen/include/asm-riscv/cache.h            |  24 +
 xen/include/asm-riscv/cmpxchg.h          | 382 +++++++++++++
 xen/include/asm-riscv/config.h           | 203 +++++++
 xen/include/asm-riscv/csr.h              | 117 ++++
 xen/include/asm-riscv/current.h          |  50 ++
 xen/include/asm-riscv/debugger.h         |  15 +
 xen/include/asm-riscv/delay.h            |  28 +
 xen/include/asm-riscv/desc.h             |  12 +
 xen/include/asm-riscv/device.h           |  15 +
 xen/include/asm-riscv/div64.h            |  23 +
 xen/include/asm-riscv/domain.h           |  85 +++
 xen/include/asm-riscv/event.h            |  42 ++
 xen/include/asm-riscv/fence.h            |  12 +
 xen/include/asm-riscv/flushtlb.h         |  56 ++
 xen/include/asm-riscv/grant_table.h      |  93 ++++
 xen/include/asm-riscv/guest_access.h     | 164 ++++++
 xen/include/asm-riscv/guest_atomics.h    |  62 +++
 xen/include/asm-riscv/hardirq.h          |  27 +
 xen/include/asm-riscv/hypercall.h        |  12 +
 xen/include/asm-riscv/init.h             |  42 ++
 xen/include/asm-riscv/io.h               | 283 ++++++++++
 xen/include/asm-riscv/iocap.h            |  16 +
 xen/include/asm-riscv/iommu.h            |  49 ++
 xen/include/asm-riscv/irq.h              |  58 ++
 xen/include/asm-riscv/mem_access.h       |  35 ++
 xen/include/asm-riscv/mm.h               | 308 ++++++++++
 xen/include/asm-riscv/monitor.h          |  65 +++
 xen/include/asm-riscv/nospec.h           |  25 +
 xen/include/asm-riscv/numa.h             |  41 ++
 xen/include/asm-riscv/p2m.h              | 410 ++++++++++++++
 xen/include/asm-riscv/page.h             | 327 +++++++++++
 xen/include/asm-riscv/paging.h           |  16 +
 xen/include/asm-riscv/pci.h              |  31 ++
 xen/include/asm-riscv/percpu.h           |  34 ++
 xen/include/asm-riscv/pgtable-bits.h     |  53 ++
 xen/include/asm-riscv/processor.h        |  60 ++
 xen/include/asm-riscv/random.h           |   9 +
 xen/include/asm-riscv/regs.h             |  42 ++
 xen/include/asm-riscv/riscv_encoding.h   | 682 +++++++++++++++++++++++
 xen/include/asm-riscv/setup.h            |  16 +
 xen/include/asm-riscv/smp.h              |  50 ++
 xen/include/asm-riscv/softirq.h          |  16 +
 xen/include/asm-riscv/spinlock.h         |  13 +
 xen/include/asm-riscv/string.h           |  28 +
 xen/include/asm-riscv/sysregs.h          |  14 +
 xen/include/asm-riscv/system.h           |  96 ++++
 xen/include/asm-riscv/time.h             |  60 ++
 xen/include/asm-riscv/trace.h            |  12 +
 xen/include/asm-riscv/types.h            |  73 +++
 xen/include/asm-riscv/vm_event.h         |  61 ++
 xen/include/asm-riscv/xenoprof.h         |  12 +
 xen/include/public/arch-riscv.h          | 181 ++++++
 xen/include/public/arch-riscv/hvm/save.h |  39 ++
 xen/include/public/hvm/save.h            |   2 +
 xen/include/public/pmu.h                 |   2 +
 xen/include/public/xen.h                 |   2 +
 62 files changed, 5455 insertions(+)
 create mode 100644 xen/include/asm-riscv/altp2m.h
 create mode 100644 xen/include/asm-riscv/asm.h
 create mode 100644 xen/include/asm-riscv/atomic.h
 create mode 100644 xen/include/asm-riscv/bitops.h
 create mode 100644 xen/include/asm-riscv/bug.h
 create mode 100644 xen/include/asm-riscv/byteorder.h
 create mode 100644 xen/include/asm-riscv/cache.h
 create mode 100644 xen/include/asm-riscv/cmpxchg.h
 create mode 100644 xen/include/asm-riscv/config.h
 create mode 100644 xen/include/asm-riscv/csr.h
 create mode 100644 xen/include/asm-riscv/current.h
 create mode 100644 xen/include/asm-riscv/debugger.h
 create mode 100644 xen/include/asm-riscv/delay.h
 create mode 100644 xen/include/asm-riscv/desc.h
 create mode 100644 xen/include/asm-riscv/device.h
 create mode 100644 xen/include/asm-riscv/div64.h
 create mode 100644 xen/include/asm-riscv/domain.h
 create mode 100644 xen/include/asm-riscv/event.h
 create mode 100644 xen/include/asm-riscv/fence.h
 create mode 100644 xen/include/asm-riscv/flushtlb.h
 create mode 100644 xen/include/asm-riscv/grant_table.h
 create mode 100644 xen/include/asm-riscv/guest_access.h
 create mode 100644 xen/include/asm-riscv/guest_atomics.h
 create mode 100644 xen/include/asm-riscv/hardirq.h
 create mode 100644 xen/include/asm-riscv/hypercall.h
 create mode 100644 xen/include/asm-riscv/init.h
 create mode 100644 xen/include/asm-riscv/io.h
 create mode 100644 xen/include/asm-riscv/iocap.h
 create mode 100644 xen/include/asm-riscv/iommu.h
 create mode 100644 xen/include/asm-riscv/irq.h
 create mode 100644 xen/include/asm-riscv/mem_access.h
 create mode 100644 xen/include/asm-riscv/mm.h
 create mode 100644 xen/include/asm-riscv/monitor.h
 create mode 100644 xen/include/asm-riscv/nospec.h
 create mode 100644 xen/include/asm-riscv/numa.h
 create mode 100644 xen/include/asm-riscv/p2m.h
 create mode 100644 xen/include/asm-riscv/page.h
 create mode 100644 xen/include/asm-riscv/paging.h
 create mode 100644 xen/include/asm-riscv/pci.h
 create mode 100644 xen/include/asm-riscv/percpu.h
 create mode 100644 xen/include/asm-riscv/pgtable-bits.h
 create mode 100644 xen/include/asm-riscv/processor.h
 create mode 100644 xen/include/asm-riscv/random.h
 create mode 100644 xen/include/asm-riscv/regs.h
 create mode 100644 xen/include/asm-riscv/riscv_encoding.h
 create mode 100644 xen/include/asm-riscv/setup.h
 create mode 100644 xen/include/asm-riscv/smp.h
 create mode 100644 xen/include/asm-riscv/softirq.h
 create mode 100644 xen/include/asm-riscv/spinlock.h
 create mode 100644 xen/include/asm-riscv/string.h
 create mode 100644 xen/include/asm-riscv/sysregs.h
 create mode 100644 xen/include/asm-riscv/system.h
 create mode 100644 xen/include/asm-riscv/time.h
 create mode 100644 xen/include/asm-riscv/trace.h
 create mode 100644 xen/include/asm-riscv/types.h
 create mode 100644 xen/include/asm-riscv/vm_event.h
 create mode 100644 xen/include/asm-riscv/xenoprof.h
 create mode 100644 xen/include/public/arch-riscv.h
 create mode 100644 xen/include/public/arch-riscv/hvm/save.h
diff mbox series

Patch

diff --git a/xen/include/asm-riscv/altp2m.h b/xen/include/asm-riscv/altp2m.h
new file mode 100644
index 0000000000..8554495f94
--- /dev/null
+++ b/xen/include/asm-riscv/altp2m.h
@@ -0,0 +1,39 @@ 
+/*
+ * Alternate p2m
+ *
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ASM_RISCV_ALTP2M_H
+#define __ASM_RISCV_ALTP2M_H
+
+#include <xen/sched.h>
+
+/* Alternate p2m on/off per domain */
+static inline bool altp2m_active(const struct domain *d)
+{
+    /* Not implemented on RISCV. */
+    return false;
+}
+
+/* Alternate p2m VCPU */
+static inline uint16_t altp2m_vcpu_idx(const struct vcpu *v)
+{
+    /* Not implemented on RISCV, should not be reached. */
+    BUG();
+    return 0;
+}
+
+#endif /* __ASM_RISCV_ALTP2M_H */
diff --git a/xen/include/asm-riscv/asm.h b/xen/include/asm-riscv/asm.h
new file mode 100644
index 0000000000..5ad4cb622b
--- /dev/null
+++ b/xen/include/asm-riscv/asm.h
@@ -0,0 +1,76 @@ 
+/*
+ * Copyright (C) 2015 Regents of the University of California
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_ASM_H
+#define _ASM_RISCV_ASM_H
+
+#ifdef __ASSEMBLY__
+#define __ASM_STR(x)	x
+#else
+#define __ASM_STR(x)	#x
+#endif
+
+#if __riscv_xlen == 64
+#define __REG_SEL(a, b)	__ASM_STR(a)
+#elif __riscv_xlen == 32
+#define __REG_SEL(a, b)	__ASM_STR(b)
+#else
+#error "Unexpected __riscv_xlen"
+#endif
+
+#define REG_L		__REG_SEL(ld, lw)
+#define REG_S		__REG_SEL(sd, sw)
+#define SZREG		__REG_SEL(8, 4)
+#define LGREG		__REG_SEL(3, 2)
+
+#if __SIZEOF_POINTER__ == 8
+#ifdef __ASSEMBLY__
+#define RISCV_PTR		.dword
+#define RISCV_SZPTR		8
+#define RISCV_LGPTR		3
+#else
+#define RISCV_PTR		".dword"
+#define RISCV_SZPTR		"8"
+#define RISCV_LGPTR		"3"
+#endif
+#elif __SIZEOF_POINTER__ == 4
+#ifdef __ASSEMBLY__
+#define RISCV_PTR		.word
+#define RISCV_SZPTR		4
+#define RISCV_LGPTR		2
+#else
+#define RISCV_PTR		".word"
+#define RISCV_SZPTR		"4"
+#define RISCV_LGPTR		"2"
+#endif
+#else
+#error "Unexpected __SIZEOF_POINTER__"
+#endif
+
+#if (__SIZEOF_INT__ == 4)
+#define RISCV_INT		__ASM_STR(.word)
+#define RISCV_SZINT		__ASM_STR(4)
+#define RISCV_LGINT		__ASM_STR(2)
+#else
+#error "Unexpected __SIZEOF_INT__"
+#endif
+
+#if (__SIZEOF_SHORT__ == 2)
+#define RISCV_SHORT		__ASM_STR(.half)
+#define RISCV_SZSHORT		__ASM_STR(2)
+#define RISCV_LGSHORT		__ASM_STR(1)
+#else
+#error "Unexpected __SIZEOF_SHORT__"
+#endif
+
+#endif /* _ASM_RISCV_ASM_H */
diff --git a/xen/include/asm-riscv/atomic.h b/xen/include/asm-riscv/atomic.h
new file mode 100644
index 0000000000..6a81365632
--- /dev/null
+++ b/xen/include/asm-riscv/atomic.h
@@ -0,0 +1,249 @@ 
+/**
+ * Copyright (c) 2018 Anup Patel.
+ * Copyright (c) 2019 Alistair Francis <alistair.francis@wdc.com>
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ * 
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ * 
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef _ASM_RISCV_ATOMIC_H
+#define _ASM_RISCV_ATOMIC_H
+
+#include <xen/atomic.h>
+#include <asm/cmpxchg.h>
+#include <asm/io.h>
+#include <asm/system.h>
+
+void __bad_atomic_size(void);
+
+#define read_atomic(p) ({                                               \
+    typeof(*p) __x;                                                     \
+    switch ( sizeof(*p) ) {                                             \
+    case 1: __x = (typeof(*p))readb((uint8_t *)p); break;               \
+    case 2: __x = (typeof(*p))readw((uint16_t *)p); break;              \
+    case 4: __x = (typeof(*p))readl((uint32_t *)p); break;              \
+    case 8: __x = (typeof(*p))readq((uint64_t *)p); break;              \
+    default: __x = 0; __bad_atomic_size(); break;                       \
+    }                                                                   \
+    __x;                                                                \
+})
+
+#define write_atomic(p, x) ({                                           \
+    typeof(*p) __x = (x);                                               \
+    switch ( sizeof(*p) ) {                                             \
+    case 1: writeb((uint8_t)__x,  (uint8_t *)  p); break;              \
+    case 2: writew((uint16_t)__x, (uint16_t *) p); break;              \
+    case 4: writel((uint32_t)__x, (uint32_t *) p); break;              \
+    case 8: writeq((uint64_t)__x, (uint64_t *) p); break;              \
+    default: __bad_atomic_size(); break;                                \
+    }                                                                   \
+    __x;                                                                \
+})
+
+/* TODO: Fix this */
+#define add_sized(p, x) ({                                              \
+    typeof(*(p)) __x = (x);                                             \
+    switch ( sizeof(*(p)) )                                             \
+    {                                                                   \
+    case 1: writeb(read_atomic(p) + __x, (uint8_t *)(p)); break;        \
+    case 2: writew(read_atomic(p) + __x, (uint16_t *)(p)); break;       \
+    case 4: writel(read_atomic(p) + __x, (uint32_t *)(p)); break;       \
+    default: __bad_atomic_size(); break;                                \
+    }                                                                   \
+})
+
+#if __riscv_xlen == 32
+static inline void atomic_add(int i, atomic_t *v)
+{
+	__asm__ __volatile__ (
+		"	amoadd.w zero, %1, %0"
+		: "+A" (v->counter)
+		: "r" (i)
+		: "memory");
+}
+
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+	int ret;
+
+	__asm__ __volatile__ (
+		"	amoadd.w.aqrl  %1, %2, %0"
+		: "+A" (v->counter), "=r" (ret)
+		: "r" (i)
+		: "memory");
+
+	return ret + i;
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+	__asm__ __volatile__ (
+		"	amoadd.w zero, %1, %0"
+		: "+A" (v->counter)
+		: "r" (-i)
+		: "memory");
+}
+
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+	int ret;
+
+	__asm__ __volatile__ (
+		"	amoadd.w.aqrl  %1, %2, %0"
+		: "+A" (v->counter), "=r" (ret)
+		: "r" (-i)
+		: "memory");
+
+	return ret - i;
+}
+#else
+static inline void atomic_add(int i, atomic_t *v)
+{
+	__asm__ __volatile__ (
+		"	amoadd.d zero, %1, %0"
+		: "+A" (v->counter)
+		: "r" (i)
+		: "memory");
+}
+
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+	int ret;
+
+	__asm__ __volatile__ (
+		"	amoadd.d.aqrl  %1, %2, %0"
+		: "+A" (v->counter), "=r" (ret)
+		: "r" (i)
+		: "memory");
+
+	return ret + i;
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+	__asm__ __volatile__ (
+		"	amoadd.d zero, %1, %0"
+		: "+A" (v->counter)
+		: "r" (-i)
+		: "memory");
+}
+
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+	int ret;
+
+	__asm__ __volatile__ (
+		"	amoadd.d.aqrl  %1, %2, %0"
+		: "+A" (v->counter), "=r" (ret)
+		: "r" (-i)
+		: "memory");
+
+	return ret - i;
+}
+#endif
+
+static inline int atomic_read(const atomic_t *v)
+{
+    return *(volatile int *)&v->counter;
+}
+
+static inline int _atomic_read(atomic_t v)
+{
+    return v.counter;
+}
+
+static inline void atomic_set(atomic_t *v, int i)
+{
+    v->counter = i;
+}
+
+static inline void _atomic_set(atomic_t *v, int i)
+{
+    v->counter = i;
+}
+
+static inline int atomic_sub_and_test(int i, atomic_t *v)
+{
+    return atomic_sub_return(i, v) == 0;
+}
+
+static inline void atomic_inc(atomic_t *v)
+{
+    atomic_add(1, v);
+}
+
+static inline int atomic_inc_return(atomic_t *v)
+{
+    return atomic_add_return(1, v);
+}
+
+static inline int atomic_inc_and_test(atomic_t *v)
+{
+    return atomic_add_return(1, v) == 0;
+}
+
+static inline void atomic_dec(atomic_t *v)
+{
+    atomic_sub(1, v);
+}
+
+static inline int atomic_dec_return(atomic_t *v)
+{
+    return atomic_sub_return(1, v);
+}
+
+static inline int atomic_dec_and_test(atomic_t *v)
+{
+    return atomic_sub_return(1, v) == 0;
+}
+
+static inline int atomic_add_negative(int i, atomic_t *v)
+{
+    return atomic_add_return(i, v) < 0;
+}
+
+#define cmpxchg(ptr, o, n)						\
+({									\
+	__typeof__(*(ptr)) _o_ = (o);					\
+	__typeof__(*(ptr)) _n_ = (n);					\
+	(__typeof__(*(ptr))) __cmpxchg((ptr),				\
+				       _o_, _n_, sizeof(*(ptr)));	\
+})
+
+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+	return cmpxchg(&v->counter, old, new);
+}
+
+static inline int atomic_add_unless(atomic_t *v, int a, int u)
+{
+       int prev, rc;
+
+	__asm__ __volatile__ (
+		"0:	lr.w     %[p],  %[c]\n"
+		"	beq      %[p],  %[u], 1f\n"
+		"	add      %[rc], %[p], %[a]\n"
+		"	sc.w.rl  %[rc], %[rc], %[c]\n"
+		"	bnez     %[rc], 0b\n"
+		"	fence    rw, rw\n"
+		"1:\n"
+		: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
+		: [a]"r" (a), [u]"r" (u)
+		: "memory");
+	return prev;
+}
+
+#endif /* _ASM_RISCV_ATOMIC_H */
diff --git a/xen/include/asm-riscv/bitops.h b/xen/include/asm-riscv/bitops.h
new file mode 100644
index 0000000000..f2f6f63b03
--- /dev/null
+++ b/xen/include/asm-riscv/bitops.h
@@ -0,0 +1,331 @@ 
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_BITOPS_H
+#define _ASM_RISCV_BITOPS_H
+
+#include <asm/system.h>
+
+#define BIT_ULL(nr)		(1ULL << (nr))
+#define BIT_MASK(nr)		(1UL << ((nr) % BITS_PER_LONG))
+#define BIT_WORD(nr)		((nr) / BITS_PER_LONG)
+#define BIT_ULL_MASK(nr)	(1ULL << ((nr) % BITS_PER_LONG_LONG))
+#define BIT_ULL_WORD(nr)	((nr) / BITS_PER_LONG_LONG)
+#define BITS_PER_BYTE		8
+
+#define __set_bit(n,p)            set_bit(n,p)
+#define __clear_bit(n,p)          clear_bit(n,p)
+
+#define BITS_PER_WORD           32
+
+#ifndef smp_mb__before_clear_bit
+#define smp_mb__before_clear_bit()  smp_mb()
+#define smp_mb__after_clear_bit()   smp_mb()
+#endif /* smp_mb__before_clear_bit */
+
+#if (BITS_PER_LONG == 64)
+#define __AMO(op)	"amo" #op ".d"
+#elif (BITS_PER_LONG == 32)
+#define __AMO(op)	"amo" #op ".w"
+#else
+#error "Unexpected BITS_PER_LONG"
+#endif
+
+#define __test_and_op_bit_ord(op, mod, nr, addr, ord)		\
+({								\
+	unsigned long __res, __mask;				\
+	__mask = BIT_MASK(nr);					\
+	__asm__ __volatile__ (					\
+		__AMO(op) #ord " %0, %2, %1"			\
+		: "=r" (__res), "+A" (addr[BIT_WORD(nr)])	\
+		: "r" (mod(__mask))				\
+		: "memory");					\
+	((__res & __mask) != 0);				\
+})
+
+#define __op_bit_ord(op, mod, nr, addr, ord)			\
+	__asm__ __volatile__ (					\
+		__AMO(op) #ord " zero, %1, %0"			\
+		: "+A" (addr[BIT_WORD(nr)])			\
+		: "r" (mod(BIT_MASK(nr)))			\
+		: "memory");
+
+#define __test_and_op_bit(op, mod, nr, addr) 			\
+	__test_and_op_bit_ord(op, mod, nr, addr, .aqrl)
+#define __op_bit(op, mod, nr, addr)				\
+	__op_bit_ord(op, mod, nr, addr, )
+
+/* Bitmask modifiers */
+#define __NOP(x)	(x)
+#define __NOT(x)	(~(x))
+
+/**
+ * __test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation may be reordered on other architectures than x86.
+ */
+static inline int __test_and_set_bit(int nr, volatile void *p)
+{
+	volatile unsigned long *addr = p;
+
+	return __test_and_op_bit(or, __NOP, nr, addr);
+}
+
+/**
+ * __test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation can be reordered on other architectures other than x86.
+ */
+static inline int __test_and_clear_bit(int nr, volatile void *p)
+{
+	volatile unsigned long *addr = p;
+
+	return __test_and_op_bit(and, __NOT, nr, addr);
+}
+
+/**
+ * __test_and_change_bit - Change a bit and return its old value
+ * @nr: Bit to change
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies a memory barrier.
+ */
+static inline int __test_and_change_bit(int nr, volatile void *p)
+{
+	volatile unsigned long *addr = p;
+
+	return __test_and_op_bit(xor, __NOP, nr, addr);
+}
+
+/**
+ * set_bit - Atomically set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * Note: there are no guarantees that this function will not be reordered
+ * on non x86 architectures, so if you are writing portable code,
+ * make sure not to rely on its reordering guarantees.
+ *
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static inline void set_bit(int nr, volatile void *p)
+{
+	volatile unsigned long *addr = p;
+
+	__op_bit(or, __NOP, nr, addr);
+}
+
+/**
+ * clear_bit - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * Note: there are no guarantees that this function will not be reordered
+ * on non x86 architectures, so if you are writing portable code,
+ * make sure not to rely on its reordering guarantees.
+ */
+static inline void clear_bit(int nr, volatile void *p)
+{
+	volatile unsigned long *addr = p;
+
+	__op_bit(and, __NOT, nr, addr);
+}
+
+static inline int test_bit(int nr, const volatile void *p)
+{
+        const volatile unsigned int *addr = (const volatile unsigned int *)p;
+
+        return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_WORD-1)));
+}
+
+/**
+ * change_bit - Toggle a bit in memory
+ * @nr: Bit to change
+ * @addr: Address to start counting from
+ *
+ * change_bit()  may be reordered on other architectures than x86.
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static inline void change_bit(int nr, volatile void *p)
+{
+	volatile unsigned long *addr = p;
+
+	__op_bit(xor, __NOP, nr, addr);
+}
+
+/**
+ * test_and_set_bit_lock - Set a bit and return its old value, for lock
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic and provides acquire barrier semantics.
+ * It can be used to implement bit locks.
+ */
+static inline int test_and_set_bit_lock(
+	unsigned long nr, volatile void *p)
+{
+	volatile unsigned long *addr = p;
+
+	return __test_and_op_bit_ord(or, __NOP, nr, addr, .aq);
+}
+
+/**
+ * clear_bit_unlock - Clear a bit in memory, for unlock
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This operation is atomic and provides release barrier semantics.
+ */
+static inline void clear_bit_unlock(
+	unsigned long nr, volatile void *p)
+{
+	volatile unsigned long *addr = p;
+
+	__op_bit_ord(and, __NOT, nr, addr, .rl);
+}
+
+/**
+ * __clear_bit_unlock - Clear a bit in memory, for unlock
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This operation is like clear_bit_unlock, however it is not atomic.
+ * It does provide release barrier semantics so it can be used to unlock
+ * a bit lock, however it would only be used if no other CPU can modify
+ * any bits in the memory until the lock is released (a good example is
+ * if the bit lock itself protects access to the other bits in the word).
+ *
+ * On RISC-V systems there seems to be no benefit to taking advantage of the
+ * non-atomic property here: it's a lot more instructions and we still have to
+ * provide release semantics anyway.
+ */
+static inline void __clear_bit_unlock(
+	unsigned long nr, volatile unsigned long *addr)
+{
+	clear_bit_unlock(nr, addr);
+}
+
+#undef __test_and_op_bit
+#undef __op_bit
+#undef __NOP
+#undef __NOT
+#undef __AMO
+
+static inline int fls(unsigned int x)
+{
+    return generic_fls(x);
+}
+
+static inline int flsl(unsigned long x)
+{
+   return generic_flsl(x);
+}
+
+
+#define test_and_set_bit   __test_and_set_bit
+#define test_and_clear_bit __test_and_clear_bit
+
+/* Based on linux/include/asm-generic/bitops/find.h */
+
+#ifndef find_next_bit
+/**
+ * find_next_bit - find the next set bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The bitmap size in bits
+ */
+extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
+		size, unsigned long offset);
+#endif
+
+#ifndef find_next_zero_bit
+/**
+ * find_next_zero_bit - find the next cleared bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The bitmap size in bits
+ */
+extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned
+		long size, unsigned long offset);
+#endif
+
+#ifdef CONFIG_GENERIC_FIND_FIRST_BIT
+
+/**
+ * find_first_bit - find the first set bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum size to search
+ *
+ * Returns the bit number of the first set bit.
+ */
+extern unsigned long find_first_bit(const unsigned long *addr,
+				    unsigned long size);
+
+/**
+ * find_first_zero_bit - find the first cleared bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum size to search
+ *
+ * Returns the bit number of the first cleared bit.
+ */
+extern unsigned long find_first_zero_bit(const unsigned long *addr,
+					 unsigned long size);
+#else /* CONFIG_GENERIC_FIND_FIRST_BIT */
+
+#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
+#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
+
+#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */
+
+#define ffs(x) ({ unsigned int __t = (x); fls(__t & -__t); })
+#define ffsl(x) ({ unsigned long __t = (x); flsl(__t & -__t); })
+
+/*
+ * ffz - find first zero in word.
+ * @word: The word to search
+ *
+ * Undefined if no zero exists, so code should check against ~0UL first.
+ */
+#define ffz(x)  ffs(~(x))
+
+/**
+ * find_first_set_bit - find the first set bit in @word
+ * @word: the word to search
+ *
+ * Returns the bit-number of the first set bit (first bit being 0).
+ * The input must *not* be zero.
+ */
+static inline unsigned int find_first_set_bit(unsigned long word)
+{
+        return ffsl(word) - 1;
+}
+
+/**
+ * hweightN - returns the hamming weight of a N-bit word
+ * @x: the word to weigh
+ *
+ * The Hamming Weight of a number is the total number of bits set in it.
+ */
+#define hweight64(x) generic_hweight64(x)
+#define hweight32(x) generic_hweight32(x)
+#define hweight16(x) generic_hweight16(x)
+#define hweight8(x) generic_hweight8(x)
+
+#endif /* _ASM_RISCV_BITOPS_H */
diff --git a/xen/include/asm-riscv/bug.h b/xen/include/asm-riscv/bug.h
new file mode 100644
index 0000000000..387832f9bf
--- /dev/null
+++ b/xen/include/asm-riscv/bug.h
@@ -0,0 +1,59 @@ 
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_BUG_H
+#define _ASM_RISCV_BUG_H
+
+#define BUGFRAME_NR     3
+
+struct bug_frame {
+    signed int loc_disp;    /* Relative address to the bug address */
+    signed int file_disp;   /* Relative address to the filename */
+    signed int msg_disp;    /* Relative address to the predicate (for ASSERT) */
+    uint16_t line;          /* Line number */
+    uint32_t pad0:16;       /* Padding for 8-bytes align */
+};
+
+#ifndef __ASSEMBLY__
+
+#define BUG()							\
+do {								\
+	__asm__ __volatile__ ("ebreak\n");			\
+	unreachable();						\
+} while (0)
+
+#define WARN()             \
+do {                \
+  __asm__ __volatile__ ("ebreak\n");      \
+} while (0)
+
+#endif /* !__ASSEMBLY__ */
+
+#ifndef __ASSEMBLY__
+
+struct pt_regs;
+struct task_struct;
+
+#endif /* !__ASSEMBLY__ */
+
+#define assert_failed(msg) do {                                \
+    BUG();                                                     \
+    unreachable();                                             \
+} while (0)
+
+extern const struct bug_frame __start_bug_frames[],
+                              __stop_bug_frames_0[],
+                              __stop_bug_frames_1[],
+                              __stop_bug_frames_2[];
+
+#endif /* _ASM_RISCV_BUG_H */
diff --git a/xen/include/asm-riscv/byteorder.h b/xen/include/asm-riscv/byteorder.h
new file mode 100644
index 0000000000..320a03c88f
--- /dev/null
+++ b/xen/include/asm-riscv/byteorder.h
@@ -0,0 +1,16 @@ 
+#ifndef __ASM_RISCV_BYTEORDER_H__
+#define __ASM_RISCV_BYTEORDER_H__
+
+#define __BYTEORDER_HAS_U64__
+
+#include <xen/byteorder/little_endian.h>
+
+#endif /* __ASM_RISCV_BYTEORDER_H__ */
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-riscv/cache.h b/xen/include/asm-riscv/cache.h
new file mode 100644
index 0000000000..1fdc702828
--- /dev/null
+++ b/xen/include/asm-riscv/cache.h
@@ -0,0 +1,24 @@ 
+/*
+ * Copyright (C) 2017 Chen Liqin <liqin.chen@sunplusct.com>
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_CACHE_H
+#define _ASM_RISCV_CACHE_H
+
+#define L1_CACHE_SHIFT		6
+
+#define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
+
+#define __read_mostly __section(".data.read_mostly")
+
+#endif /* _ASM_RISCV_CACHE_H */
diff --git a/xen/include/asm-riscv/cmpxchg.h b/xen/include/asm-riscv/cmpxchg.h
new file mode 100644
index 0000000000..713f1f785e
--- /dev/null
+++ b/xen/include/asm-riscv/cmpxchg.h
@@ -0,0 +1,382 @@ 
+/*
+ * Copyright (C) 2014 Regents of the University of California
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_CMPXCHG_H
+#define _ASM_RISCV_CMPXCHG_H
+
+#include <asm/system.h>
+#include <asm/fence.h>
+#include <xen/lib.h>
+
+#define __xchg_relaxed(ptr, new, size)					\
+({									\
+	__typeof__(ptr) __ptr = (ptr);					\
+	__typeof__(new) __new = (new);					\
+	__typeof__(*(ptr)) __ret;					\
+	switch (size) {							\
+	case 4:								\
+		__asm__ __volatile__ (					\
+			"	amoswap.w %0, %2, %1\n"			\
+			: "=r" (__ret), "+A" (*__ptr)			\
+			: "r" (__new)					\
+			: "memory");					\
+		break;							\
+	case 8:								\
+		__asm__ __volatile__ (					\
+			"	amoswap.d %0, %2, %1\n"			\
+			: "=r" (__ret), "+A" (*__ptr)			\
+			: "r" (__new)					\
+			: "memory");					\
+		break;							\
+	default:							\
+		ASSERT_UNREACHABLE();					\
+	}								\
+	__ret;								\
+})
+
+#define xchg_relaxed(ptr, x)						\
+({									\
+	__typeof__(*(ptr)) _x_ = (x);					\
+	(__typeof__(*(ptr))) __xchg_relaxed((ptr),			\
+					    _x_, sizeof(*(ptr)));	\
+})
+
+#define __xchg_acquire(ptr, new, size)					\
+({									\
+	__typeof__(ptr) __ptr = (ptr);					\
+	__typeof__(new) __new = (new);					\
+	__typeof__(*(ptr)) __ret;					\
+	switch (size) {							\
+	case 4:								\
+		__asm__ __volatile__ (					\
+			"	amoswap.w %0, %2, %1\n"			\
+			RISCV_ACQUIRE_BARRIER				\
+			: "=r" (__ret), "+A" (*__ptr)			\
+			: "r" (__new)					\
+			: "memory");					\
+		break;							\
+	case 8:								\
+		__asm__ __volatile__ (					\
+			"	amoswap.d %0, %2, %1\n"			\
+			RISCV_ACQUIRE_BARRIER				\
+			: "=r" (__ret), "+A" (*__ptr)			\
+			: "r" (__new)					\
+			: "memory");					\
+		break;							\
+	default:							\
+		ASSERT_UNREACHABLE();					\
+	}								\
+	__ret;								\
+})
+
+#define xchg_acquire(ptr, x)						\
+({									\
+	__typeof__(*(ptr)) _x_ = (x);					\
+	(__typeof__(*(ptr))) __xchg_acquire((ptr),			\
+					    _x_, sizeof(*(ptr)));	\
+})
+
+#define __xchg_release(ptr, new, size)					\
+({									\
+	__typeof__(ptr) __ptr = (ptr);					\
+	__typeof__(new) __new = (new);					\
+	__typeof__(*(ptr)) __ret;					\
+	switch (size) {							\
+	case 4:								\
+		__asm__ __volatile__ (					\
+			RISCV_RELEASE_BARRIER				\
+			"	amoswap.w %0, %2, %1\n"			\
+			: "=r" (__ret), "+A" (*__ptr)			\
+			: "r" (__new)					\
+			: "memory");					\
+		break;							\
+	case 8:								\
+		__asm__ __volatile__ (					\
+			RISCV_RELEASE_BARRIER				\
+			"	amoswap.d %0, %2, %1\n"			\
+			: "=r" (__ret), "+A" (*__ptr)			\
+			: "r" (__new)					\
+			: "memory");					\
+		break;							\
+	default:							\
+		ASSERT_UNREACHABLE();					\
+	}								\
+	__ret;								\
+})
+
+#define xchg_release(ptr, x)						\
+({									\
+	__typeof__(*(ptr)) _x_ = (x);					\
+	(__typeof__(*(ptr))) __xchg_release((ptr),			\
+					    _x_, sizeof(*(ptr)));	\
+})
+
+#define __xchg(ptr, new, size)						\
+({									\
+	__typeof__(ptr) __ptr = (ptr);					\
+	__typeof__(new) __new = (new);					\
+	__typeof__(*(ptr)) __ret;					\
+	switch (size) {							\
+	case 4:								\
+		__asm__ __volatile__ (					\
+			"	amoswap.w.aqrl %0, %2, %1\n"		\
+			: "=r" (__ret), "+A" (*__ptr)			\
+			: "r" (__new)					\
+			: "memory");					\
+		break;							\
+	case 8:								\
+		__asm__ __volatile__ (					\
+			"	amoswap.d.aqrl %0, %2, %1\n"		\
+			: "=r" (__ret), "+A" (*__ptr)			\
+			: "r" (__new)					\
+			: "memory");					\
+		break;							\
+	default:							\
+		ASSERT_UNREACHABLE();					\
+	}								\
+	__ret;								\
+})
+
+#define xchg(ptr, x)							\
+({									\
+	__typeof__(*(ptr)) _x_ = (x);					\
+	(__typeof__(*(ptr))) __xchg((ptr), _x_, sizeof(*(ptr)));	\
+})
+
+#define xchg32(ptr, x)							\
+({									\
+	BUILD_BUG_ON(sizeof(*(ptr)) != 4);				\
+	xchg((ptr), (x));						\
+})
+
+#define xchg64(ptr, x)							\
+({									\
+	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
+	xchg((ptr), (x));						\
+})
+
+/*
+ * Atomic compare and exchange.  Compare OLD with MEM, if identical,
+ * store NEW in MEM.  Return the initial value in MEM.  Success is
+ * indicated by comparing RETURN with OLD.
+ */
+#define __cmpxchg_relaxed(ptr, old, new, size)				\
+({									\
+	__typeof__(ptr) __ptr = (ptr);					\
+	__typeof__(*(ptr)) __old = (old);				\
+	__typeof__(*(ptr)) __new = (new);				\
+	__typeof__(*(ptr)) __ret;					\
+	register unsigned int __rc;					\
+	switch (size) {							\
+	case 4:								\
+		__asm__ __volatile__ (					\
+			"0:	lr.w %0, %2\n"				\
+			"	bne  %0, %z3, 1f\n"			\
+			"	sc.w %1, %z4, %2\n"			\
+			"	bnez %1, 0b\n"				\
+			"1:\n"						\
+			: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr)	\
+			: "rJ" (__old), "rJ" (__new)			\
+			: "memory");					\
+		break;							\
+	case 8:								\
+		__asm__ __volatile__ (					\
+			"0:	lr.d %0, %2\n"				\
+			"	bne %0, %z3, 1f\n"			\
+			"	sc.d %1, %z4, %2\n"			\
+			"	bnez %1, 0b\n"				\
+			"1:\n"						\
+			: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr)	\
+			: "rJ" (__old), "rJ" (__new)			\
+			: "memory");					\
+		break;							\
+	default:							\
+		ASSERT_UNREACHABLE();					\
+	}								\
+	__ret;								\
+})
+
+#define cmpxchg_relaxed(ptr, o, n)					\
+({									\
+	__typeof__(*(ptr)) _o_ = (o);					\
+	__typeof__(*(ptr)) _n_ = (n);					\
+	(__typeof__(*(ptr))) __cmpxchg_relaxed((ptr),			\
+					_o_, _n_, sizeof(*(ptr)));	\
+})
+
+#define __cmpxchg_acquire(ptr, old, new, size)				\
+({									\
+	__typeof__(ptr) __ptr = (ptr);					\
+	__typeof__(*(ptr)) __old = (old);				\
+	__typeof__(*(ptr)) __new = (new);				\
+	__typeof__(*(ptr)) __ret;					\
+	register unsigned int __rc;					\
+	switch (size) {							\
+	case 4:								\
+		__asm__ __volatile__ (					\
+			"0:	lr.w %0, %2\n"				\
+			"	bne  %0, %z3, 1f\n"			\
+			"	sc.w %1, %z4, %2\n"			\
+			"	bnez %1, 0b\n"				\
+			RISCV_ACQUIRE_BARRIER				\
+			"1:\n"						\
+			: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr)	\
+			: "rJ" (__old), "rJ" (__new)			\
+			: "memory");					\
+		break;							\
+	case 8:								\
+		__asm__ __volatile__ (					\
+			"0:	lr.d %0, %2\n"				\
+			"	bne %0, %z3, 1f\n"			\
+			"	sc.d %1, %z4, %2\n"			\
+			"	bnez %1, 0b\n"				\
+			RISCV_ACQUIRE_BARRIER				\
+			"1:\n"						\
+			: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr)	\
+			: "rJ" (__old), "rJ" (__new)			\
+			: "memory");					\
+		break;							\
+	default:							\
+		ASSERT_UNREACHABLE();					\
+	}								\
+	__ret;								\
+})
+
+#define cmpxchg_acquire(ptr, o, n)					\
+({									\
+	__typeof__(*(ptr)) _o_ = (o);					\
+	__typeof__(*(ptr)) _n_ = (n);					\
+	(__typeof__(*(ptr))) __cmpxchg_acquire((ptr),			\
+					_o_, _n_, sizeof(*(ptr)));	\
+})
+
+#define __cmpxchg_release(ptr, old, new, size)				\
+({									\
+	__typeof__(ptr) __ptr = (ptr);					\
+	__typeof__(*(ptr)) __old = (old);				\
+	__typeof__(*(ptr)) __new = (new);				\
+	__typeof__(*(ptr)) __ret;					\
+	register unsigned int __rc;					\
+	switch (size) {							\
+	case 4:								\
+		__asm__ __volatile__ (					\
+			RISCV_RELEASE_BARRIER				\
+			"0:	lr.w %0, %2\n"				\
+			"	bne  %0, %z3, 1f\n"			\
+			"	sc.w %1, %z4, %2\n"			\
+			"	bnez %1, 0b\n"				\
+			"1:\n"						\
+			: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr)	\
+			: "rJ" (__old), "rJ" (__new)			\
+			: "memory");					\
+		break;							\
+	case 8:								\
+		__asm__ __volatile__ (					\
+			RISCV_RELEASE_BARRIER				\
+			"0:	lr.d %0, %2\n"				\
+			"	bne %0, %z3, 1f\n"			\
+			"	sc.d %1, %z4, %2\n"			\
+			"	bnez %1, 0b\n"				\
+			"1:\n"						\
+			: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr)	\
+			: "rJ" (__old), "rJ" (__new)			\
+			: "memory");					\
+		break;							\
+	default:							\
+		ASSERT_UNREACHABLE();					\
+	}								\
+	__ret;								\
+})
+
+#define cmpxchg_release(ptr, o, n)					\
+({									\
+	__typeof__(*(ptr)) _o_ = (o);					\
+	__typeof__(*(ptr)) _n_ = (n);					\
+	(__typeof__(*(ptr))) __cmpxchg_release((ptr),			\
+					_o_, _n_, sizeof(*(ptr)));	\
+})
+
+#define __cmpxchg(ptr, old, new, size)					\
+({									\
+	__typeof__(ptr) __ptr = (ptr);					\
+	__typeof__(*(ptr)) __old = (old);				\
+	__typeof__(*(ptr)) __new = (new);				\
+	__typeof__(*(ptr)) __ret;					\
+	register unsigned int __rc;					\
+	switch (size) {							\
+	case 4:								\
+		__asm__ __volatile__ (					\
+			"0:	lr.w %0, %2\n"				\
+			"	bne  %0, %z3, 1f\n"			\
+			"	sc.w.rl %1, %z4, %2\n"			\
+			"	bnez %1, 0b\n"				\
+			"	fence rw, rw\n"				\
+			"1:\n"						\
+			: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr)	\
+			: "rJ" (__old), "rJ" (__new)			\
+			: "memory");					\
+		break;							\
+	case 8:								\
+		__asm__ __volatile__ (					\
+			"0:	lr.d %0, %2\n"				\
+			"	bne %0, %z3, 1f\n"			\
+			"	sc.d.rl %1, %z4, %2\n"			\
+			"	bnez %1, 0b\n"				\
+			"	fence rw, rw\n"				\
+			"1:\n"						\
+			: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr)	\
+			: "rJ" (__old), "rJ" (__new)			\
+			: "memory");					\
+		break;							\
+	default:							\
+		ASSERT_UNREACHABLE();					\
+	}								\
+	__ret;								\
+})
+
+#define cmpxchg(ptr, o, n)						\
+({									\
+	__typeof__(*(ptr)) _o_ = (o);					\
+	__typeof__(*(ptr)) _n_ = (n);					\
+	(__typeof__(*(ptr))) __cmpxchg((ptr),				\
+				       _o_, _n_, sizeof(*(ptr)));	\
+})
+
+#define cmpxchg_local(ptr, o, n)					\
+	(__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr))))
+
+#define cmpxchg32(ptr, o, n)						\
+({									\
+	BUILD_BUG_ON(sizeof(*(ptr)) != 4);				\
+	cmpxchg((ptr), (o), (n));					\
+})
+
+#define cmpxchg32_local(ptr, o, n)					\
+({									\
+	BUILD_BUG_ON(sizeof(*(ptr)) != 4);				\
+	cmpxchg_relaxed((ptr), (o), (n))				\
+})
+
+#define cmpxchg64(ptr, o, n)						\
+({									\
+	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
+	cmpxchg((ptr), (o), (n));					\
+})
+
+#define cmpxchg64_local(ptr, o, n)					\
+({									\
+	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
+	cmpxchg_relaxed((ptr), (o), (n));				\
+})
+
+#endif /* _ASM_RISCV_CMPXCHG_H */
diff --git a/xen/include/asm-riscv/config.h b/xen/include/asm-riscv/config.h
new file mode 100644
index 0000000000..fe5b058fd4
--- /dev/null
+++ b/xen/include/asm-riscv/config.h
@@ -0,0 +1,203 @@ 
+/******************************************************************************
+ * config.h
+ *
+ * A Linux-style configuration list.
+ */
+
+#ifndef __RISCV_CONFIG_H__
+#define __RISCV_CONFIG_H__
+
+#include <xen/const.h>
+
+/*
+ * RISC-V Layout:
+ *   0  -   2M   Unmapped
+ *   2M -   4M   Xen text, data, bss
+ *   4M -   6M   Fixmap: special-purpose 4K mapping slots
+ *   6M -  10M   Early boot mapping of FDT
+ *   10M - 12M   Early relocation address (used when relocating Xen)
+ *               and later for livepatch vmap (if compiled in)
+ *
+ *   All of the above is mapped in L2 slot[0] (except for Unmapped)
+ *
+ *   1G - 2G   VMAP: ioremap and early_ioremap (L2 slot 2)
+ *
+ *   2G - 5G: Unused
+ *
+ *   5G - 8G
+ *   0x140000000 - 0x200000000
+ *   Frametable: 24 bytes per page for 371GB of RAM, GB-aligned (slightly over 2GB, L2 slots [6..7])
+ *
+ *   8G - 12G : Unused
+ *
+ *   0x300000000  - 0x5fffffffff : 371GB, L2 Slots [12...384)
+ *   1:1 mapping of RAM
+ *
+ *   0x6000000000 - 0x7fffffffff : 127GB, L2 slots [384..512)
+ *   Unused
+ */
+
+
+#if defined(CONFIG_RISCV_64)
+# define LONG_BYTEORDER 3
+# define ELFSIZE 64
+#else
+# define LONG_BYTEORDER 2
+# define ELFSIZE 32
+#endif
+
+#define BYTES_PER_LONG (1 << LONG_BYTEORDER)
+#define BITS_PER_LONG (BYTES_PER_LONG << 3)
+#define POINTER_ALIGN BYTES_PER_LONG
+
+#define BITS_PER_LLONG 64
+
+#ifdef CONFIG_RISCV_64
+#define PADDR_BITS              39
+#else
+#define PADDR_BITS              32
+#endif
+#define PADDR_MASK              ((1ULL << PADDR_BITS)-1)
+
+#define VADDR_BITS              32
+#define VADDR_MASK              (~0UL)
+
+#define PAGE_SHIFT	(12)
+#define PAGE_SIZE	(_AC(1, UL) << PAGE_SHIFT)
+#define PAGE_MASK	(~(PAGE_SIZE - 1))
+
+#ifdef CONFIG_RISCV_64
+
+/* Bit counts for virtual address fields (sv39) */
+#define VPN2_BITS   (9)
+#define VPN1_BITS   (9)
+#define VPN0_BITS   (9)
+#define OFFSET_BITS (12)
+
+/* SLOT2_ENTRY_BITS == 30 */
+#define SLOT2_ENTRY_BITS  (VPN1_BITS + VPN2_BITS + OFFSET_BITS) 
+#define SLOT2(slot) (_AT(vaddr_t,slot) << SLOT2_ENTRY_BITS)
+#define SLOT2_ENTRY_SIZE  SLOT2(1)
+
+#define DIRECTMAP_VIRT_START   SLOT2(12)
+
+/* See above "RISC-V Layout" for description of layout (and
+ * where these magic numbers come from */
+#define DIRECTMAP_SIZE         (SLOT2_ENTRY_SIZE * (384-12))
+#define DIRECTMAP_VIRT_END     (DIRECTMAP_VIRT_START + DIRECTMAP_SIZE - 1)
+#define XENHEAP_VIRT_START     xenheap_virt_start
+#define HYPERVISOR_VIRT_END    DIRECTMAP_VIRT_END
+
+#else /* RISCV_32 */
+#error "RISC-V 32-bit is not supported yet"
+#define XENHEAP_VIRT_START     _AT(vaddr_t,0x40000000)
+#define XENHEAP_VIRT_END       _AT(vaddr_t,0x7fffffff)
+#define DOMHEAP_VIRT_START     _AT(vaddr_t,0x80000000)
+#define DOMHEAP_VIRT_END       _AT(vaddr_t,0xffffffff)
+#endif
+
+/* xen_ulong_t is always 64 bits */
+#define BITS_PER_XEN_ULONG 64
+
+#define CONFIG_PAGING_LEVELS 3
+
+#define CONFIG_RISCV 1
+
+#define CONFIG_RISCV_L1_CACHE_SHIFT 7 /* XXX */
+
+#define CONFIG_SMP 1
+
+#define CONFIG_IRQ_HAS_MULTIPLE_ACTION 1
+
+#define CONFIG_PAGEALLOC_MAX_ORDER 18
+#define CONFIG_DOMU_MAX_ORDER      9
+#define CONFIG_HWDOM_MAX_ORDER     10
+
+#define OPT_CONSOLE_STR "dtuart"
+
+#ifdef CONFIG_RISCV_64
+#define MAX_VIRT_CPUS 128u
+#else
+#define MAX_VIRT_CPUS 8u
+#endif
+
+#define XEN_VIRT_START         _AT(vaddr_t,0x00200000)
+
+#define HYPERVISOR_VIRT_START  XEN_VIRT_START
+
+#define INVALID_VCPU_ID MAX_VIRT_CPUS
+
+#define STACK_ORDER 3
+#define STACK_SIZE  (PAGE_SIZE << STACK_ORDER)
+
+#define VMAP_VIRT_START  GB(1)
+#define VMAP_VIRT_END    (VMAP_VIRT_START + GB(1))
+
+#define FRAMETABLE_VIRT_START  GB(5)
+#define FRAMETABLE_SIZE        GB(1)
+#define FRAMETABLE_NR          (FRAMETABLE_SIZE / sizeof(*frame_table))
+#define FRAMETABLE_VIRT_END    (FRAMETABLE_VIRT_START + FRAMETABLE_SIZE - 1)
+
+#ifndef ASM_NL
+#define ASM_NL		 ;
+#endif
+
+#ifndef __ALIGN
+#define __ALIGN		.align 4,0x90
+#define __ALIGN_STR	".align 4,0x90"
+#endif
+
+#ifdef __ASSEMBLY__
+
+#define ALIGN __ALIGN
+#define ALIGN_STR __ALIGN_STR
+
+#ifndef GLOBAL
+#define GLOBAL(name) \
+	.globl name ASM_NL \
+	name:
+#endif
+
+#ifndef ENTRY
+#define ENTRY(name) \
+	.globl name ASM_NL \
+	ALIGN ASM_NL \
+	name:
+#endif
+
+#ifndef WEAK
+#define WEAK(name)	   \
+	.weak name ASM_NL   \
+	ALIGN ASM_NL \
+	name:
+#endif
+
+#ifndef END
+#define END(name) \
+	.size name, .-name
+#endif
+
+/* If symbol 'name' is treated as a subroutine (gets called, and returns)
+ * then please use ENDPROC to mark 'name' as STT_FUNC for the benefit of
+ * static analysis tools such as stack depth analyzer.
+ */
+#ifndef ENDPROC
+#define ENDPROC(name) \
+	.type name, @function ASM_NL \
+	END(name)
+#endif
+
+#define __PAGE_ALIGNED_DATA	.section ".data..page_aligned", "aw"
+#define __PAGE_ALIGNED_BSS	.section ".bss..page_aligned", "aw"
+
+#endif
+
+#endif /* __RISCV_CONFIG_H__ */
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-riscv/csr.h b/xen/include/asm-riscv/csr.h
new file mode 100644
index 0000000000..1d4c50f5f6
--- /dev/null
+++ b/xen/include/asm-riscv/csr.h
@@ -0,0 +1,117 @@ 
+/*
+ * Copyright (C) 2015 Regents of the University of California
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_CSR_H
+#define _ASM_RISCV_CSR_H
+
+#include <asm/riscv_encoding.h>
+
+/* Status register flags */
+#define SR_SIE	0x00000002UL /* Supervisor Interrupt Enable */
+#define SR_SPIE	0x00000020UL /* Previous Supervisor IE */
+#define SR_SPP	0x00000100UL /* Previously Supervisor */
+#define SR_SUM	0x00040000UL /* Supervisor may access User Memory */
+
+#define SR_FS           0x00006000UL /* Floating-point Status */
+#define SR_FS_OFF       0x00000000UL
+#define SR_FS_INITIAL   0x00002000UL
+#define SR_FS_CLEAN     0x00004000UL
+#define SR_FS_DIRTY     0x00006000UL
+
+#define SR_XS           0x00018000UL /* Extension Status */
+#define SR_XS_OFF       0x00000000UL
+#define SR_XS_INITIAL   0x00008000UL
+#define SR_XS_CLEAN     0x00010000UL
+#define SR_XS_DIRTY     0x00018000UL
+
+#ifndef CONFIG_64BIT
+#define SR_SD   0x80000000UL /* FS/XS dirty */
+#else
+#define SR_SD   0x8000000000000000UL /* FS/XS dirty */
+#endif
+
+#define EXC_INST_MISALIGNED     0
+#define EXC_INST_ACCESS         1
+#define EXC_BREAKPOINT          3
+#define EXC_LOAD_ACCESS         5
+#define EXC_STORE_ACCESS        7
+#define EXC_SYSCALL             8
+#define EXC_INST_PAGE_FAULT     12
+#define EXC_LOAD_PAGE_FAULT     13
+#define EXC_STORE_PAGE_FAULT    15
+
+#ifndef __ASSEMBLY__
+
+#define csr_swap(csr, val)					\
+({								\
+	unsigned long __v = (unsigned long)(val);		\
+	__asm__ __volatile__ ("csrrw %0, " #csr ", %1"		\
+			      : "=r" (__v) : "rK" (__v)		\
+			      : "memory");			\
+	__v;							\
+})
+
+#define csr_read(csr)						\
+({								\
+	register unsigned long __v;				\
+	__asm__ __volatile__ ("csrr %0, " #csr			\
+			      : "=r" (__v) :			\
+			      : "memory");			\
+	__v;							\
+})
+
+#define csr_write(csr, val)					\
+({								\
+	unsigned long __v = (unsigned long)(val);		\
+	__asm__ __volatile__ ("csrw " #csr ", %0"		\
+			      : : "rK" (__v)			\
+			      : "memory");			\
+})
+
+#define csr_read_set(csr, val)					\
+({								\
+	unsigned long __v = (unsigned long)(val);		\
+	__asm__ __volatile__ ("csrrs %0, " #csr ", %1"		\
+			      : "=r" (__v) : "rK" (__v)		\
+			      : "memory");			\
+	__v;							\
+})
+
+#define csr_set(csr, val)					\
+({								\
+	unsigned long __v = (unsigned long)(val);		\
+	__asm__ __volatile__ ("csrs " #csr ", %0"		\
+			      : : "rK" (__v)			\
+			      : "memory");			\
+})
+
+#define csr_read_clear(csr, val)				\
+({								\
+	unsigned long __v = (unsigned long)(val);		\
+	__asm__ __volatile__ ("csrrc %0, " #csr ", %1"		\
+			      : "=r" (__v) : "rK" (__v)		\
+			      : "memory");			\
+	__v;							\
+})
+
+#define csr_clear(csr, val)					\
+({								\
+	unsigned long __v = (unsigned long)(val);		\
+	__asm__ __volatile__ ("csrc " #csr ", %0"		\
+			      : : "rK" (__v)			\
+			      : "memory");			\
+})
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_RISCV_CSR_H */
diff --git a/xen/include/asm-riscv/current.h b/xen/include/asm-riscv/current.h
new file mode 100644
index 0000000000..ddfeaaf5f5
--- /dev/null
+++ b/xen/include/asm-riscv/current.h
@@ -0,0 +1,50 @@ 
+#ifndef __ASM_CURRENT_H
+#define __ASM_CURRENT_H
+
+#include <xen/percpu.h>
+
+#include <asm/processor.h>
+
+#ifndef __ASSEMBLY__
+
+struct vcpu;
+
+/* Which VCPU is "current" on this PCPU. */
+DECLARE_PER_CPU(struct vcpu *, curr_vcpu);
+
+#define current            (this_cpu(curr_vcpu))
+#define set_current(vcpu)  do { current = (vcpu); } while (0)
+
+/* Per-VCPU state that lives at the top of the stack */
+struct cpu_info {
+    struct cpu_user_regs guest_cpu_user_regs;
+    unsigned long elr;
+    uint32_t flags;
+};
+
+static inline struct cpu_info *get_cpu_info(void)
+{
+    register unsigned long sp asm ("sp");
+    return (struct cpu_info *)((sp & ~(STACK_SIZE - 1)) + STACK_SIZE - sizeof(struct cpu_info));
+}
+
+#define guest_cpu_user_regs() (&get_cpu_info()->guest_cpu_user_regs)
+
+/* TODO */
+#define switch_stack_and_jump(stack, fn)                                \
+	wait_for_interrupt();
+    //asm volatile ("mov sp,%0; b " STR(fn) : : "r" (stack) : "memory" )
+
+#define reset_stack_and_jump(fn) switch_stack_and_jump(get_cpu_info(), fn)
+
+DECLARE_PER_CPU(unsigned int, cpu_id);
+
+#define get_processor_id()    (this_cpu(cpu_id))
+#define set_processor_id(id)  do {                      \
+    csr_write(sscratch, __per_cpu_offset[id]);      \
+    this_cpu(cpu_id) = (id);                            \
+} while(0)
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_CURRENT_H */
diff --git a/xen/include/asm-riscv/debugger.h b/xen/include/asm-riscv/debugger.h
new file mode 100644
index 0000000000..af4fc8a838
--- /dev/null
+++ b/xen/include/asm-riscv/debugger.h
@@ -0,0 +1,15 @@ 
+#ifndef __RISCV_DEBUGGER_H__
+#define __RISCV_DEBUGGER_H__
+
+#define debugger_trap_fatal(v, r) (0)
+#define debugger_trap_immediate() ((void) 0)
+
+#endif /* __RISCV_DEBUGGER_H__ */
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-riscv/delay.h b/xen/include/asm-riscv/delay.h
new file mode 100644
index 0000000000..cbb0c9eb96
--- /dev/null
+++ b/xen/include/asm-riscv/delay.h
@@ -0,0 +1,28 @@ 
+/*
+ * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
+ * Copyright (C) 2016 Regents of the University of California
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_DELAY_H
+#define _ASM_RISCV_DELAY_H
+
+extern unsigned long riscv_timebase;
+
+#define udelay udelay
+extern void udelay(unsigned long usecs);
+
+#define ndelay ndelay
+extern void ndelay(unsigned long nsecs);
+
+extern void __delay(unsigned long cycles);
+
+#endif /* _ASM_RISCV_DELAY_H */
diff --git a/xen/include/asm-riscv/desc.h b/xen/include/asm-riscv/desc.h
new file mode 100644
index 0000000000..a4d02d5eef
--- /dev/null
+++ b/xen/include/asm-riscv/desc.h
@@ -0,0 +1,12 @@ 
+#ifndef __ARCH_DESC_H
+#define __ARCH_DESC_H
+
+#endif /* __ARCH_DESC_H */
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-riscv/device.h b/xen/include/asm-riscv/device.h
new file mode 100644
index 0000000000..e38d2a9712
--- /dev/null
+++ b/xen/include/asm-riscv/device.h
@@ -0,0 +1,15 @@ 
+#ifndef __ASM_RISCV_DEVICE_H
+#define __ASM_RISCV_DEVICE_H
+
+typedef struct device device_t;
+
+#endif /* __ASM_RISCV_DEVICE_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-riscv/div64.h b/xen/include/asm-riscv/div64.h
new file mode 100644
index 0000000000..0a88dd30ad
--- /dev/null
+++ b/xen/include/asm-riscv/div64.h
@@ -0,0 +1,23 @@ 
+#ifndef __ASM_RISCV_DIV64
+#define __ASM_RISCV_DIV64
+
+#include <asm/system.h>
+#include <xen/types.h>
+
+# define do_div(n,base) ({                                      \
+        uint32_t __base = (base);                               \
+        uint32_t __rem;                                         \
+        __rem = ((uint64_t)(n)) % __base;                       \
+        (n) = ((uint64_t)(n)) / __base;                         \
+        __rem;                                                  \
+ })
+
+#endif
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-riscv/domain.h b/xen/include/asm-riscv/domain.h
new file mode 100644
index 0000000000..5d87197af4
--- /dev/null
+++ b/xen/include/asm-riscv/domain.h
@@ -0,0 +1,85 @@ 
+#ifndef __ASM_DOMAIN_H__
+#define __ASM_DOMAIN_H__
+
+#include <xen/cache.h>
+#include <xen/sched.h>
+#include <asm/page.h>
+#include <asm/p2m.h>
+#include <public/hvm/params.h>
+#include <xen/serial.h>
+#include <xen/rbtree.h>
+
+struct hvm_domain
+{
+    uint64_t              params[HVM_NR_PARAMS];
+};
+
+#ifdef CONFIG_RISCV_64
+enum domain_type {
+    DOMAIN_32BIT,
+    DOMAIN_64BIT,
+};
+#define is_32bit_domain(d) ((d)->arch.type == DOMAIN_32BIT)
+#define is_64bit_domain(d) ((d)->arch.type == DOMAIN_64BIT)
+#else
+#define is_32bit_domain(d) (1)
+#define is_64bit_domain(d) (0)
+#endif
+
+/* The hardware domain has always its memory direct mapped. */
+#define is_domain_direct_mapped(d) ((d) == hardware_domain)
+
+struct vtimer {
+        struct vcpu *v;
+        int irq;
+        struct timer timer;
+        uint32_t ctl;
+        uint64_t cval;
+};
+
+struct arch_domain
+{
+    /* Virtual MMU */
+    struct p2m_domain p2m;
+
+    struct hvm_domain hvm;
+
+}  __cacheline_aligned;
+
+struct arch_vcpu
+{
+
+    /*
+     * Points into ->stack, more convenient than doing pointer arith
+     * all the time.
+     */
+    struct cpu_info *cpu_info;
+
+}  __cacheline_aligned;
+
+void vcpu_show_execution_state(struct vcpu *);
+void vcpu_show_registers(const struct vcpu *);
+
+static inline struct vcpu_guest_context *alloc_vcpu_guest_context(void)
+{
+    return xmalloc(struct vcpu_guest_context);
+}
+
+static inline void free_vcpu_guest_context(struct vcpu_guest_context *vgc)
+{
+    xfree(vgc);
+}
+
+static inline void arch_vcpu_block(struct vcpu *v) {}
+
+#endif /* __ASM_DOMAIN_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-riscv/event.h b/xen/include/asm-riscv/event.h
new file mode 100644
index 0000000000..88e10f414b
--- /dev/null
+++ b/xen/include/asm-riscv/event.h
@@ -0,0 +1,42 @@ 
+#ifndef __ASM_EVENT_H__
+#define __ASM_EVENT_H__
+
+#include <xen/errno.h>
+#include <asm/domain.h>
+#include <asm/bug.h>
+
+void vcpu_kick(struct vcpu *v);
+void vcpu_mark_events_pending(struct vcpu *v);
+void vcpu_update_evtchn_irq(struct vcpu *v);
+void vcpu_block_unless_event_pending(struct vcpu *v);
+
+static inline int vcpu_event_delivery_is_enabled(struct vcpu *v)
+{
+    return 0;
+}
+
+static inline int local_events_need_delivery(void)
+{
+    return 0;
+}
+
+static inline void local_event_delivery_enable(void)
+{
+
+}
+
+/* No arch specific virq definition now. Default to global. */
+static inline bool arch_virq_is_global(unsigned int virq)
+{
+    return true;
+}
+
+#endif
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-riscv/fence.h b/xen/include/asm-riscv/fence.h
new file mode 100644
index 0000000000..2b443a3a48
--- /dev/null
+++ b/xen/include/asm-riscv/fence.h
@@ -0,0 +1,12 @@ 
+#ifndef _ASM_RISCV_FENCE_H
+#define _ASM_RISCV_FENCE_H
+
+#ifdef CONFIG_SMP
+#define RISCV_ACQUIRE_BARRIER		"\tfence r , rw\n"
+#define RISCV_RELEASE_BARRIER		"\tfence rw,  w\n"
+#else
+#define RISCV_ACQUIRE_BARRIER
+#define RISCV_RELEASE_BARRIER
+#endif
+
+#endif	/* _ASM_RISCV_FENCE_H */
diff --git a/xen/include/asm-riscv/flushtlb.h b/xen/include/asm-riscv/flushtlb.h
new file mode 100644
index 0000000000..4709625dad
--- /dev/null
+++ b/xen/include/asm-riscv/flushtlb.h
@@ -0,0 +1,56 @@ 
+#ifndef __ASM_RISCV_FLUSHTLB_H__
+#define __ASM_RISCV_FLUSHTLB_H__
+
+#include <xen/cpumask.h>
+#include <public/domctl.h>
+
+/*
+ * Filter the given set of CPUs, removing those that definitely flushed their
+ * TLB since @page_timestamp.
+ */
+/* XXX lazy implementation just doesn't clear anything.... */
+static inline void tlbflush_filter(cpumask_t *mask, uint32_t page_timestamp) {}
+
+#define tlbflush_current_time()                 (0)
+
+static inline void page_set_tlbflush_timestamp(struct page_info *page)
+{
+    page->tlbflush_timestamp = tlbflush_current_time();
+}
+
+/* Flush specified CPUs' TLBs */
+void flush_tlb_mask(const cpumask_t *mask);
+
+/* Flush local TLBs, current VMID only */
+static inline void flush_guest_tlb_local(void)
+{
+    /* TODO */
+}
+
+/* Flush innershareable TLBs, current VMID only */
+static inline void flush_guest_tlb(void)
+{
+    /* TODO */
+}
+
+/* Flush local TLBs, all VMIDs, non-hypervisor mode */
+static inline void flush_all_guests_tlb_local(void)
+{
+    /* TODO */
+}
+
+/* Flush innershareable TLBs, all VMIDs, non-hypervisor mode */
+static inline void flush_all_guests_tlb(void)
+{
+	/* TODO */
+}
+
+#endif /* __ASM_RISCV_FLUSHTLB_H__ */
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-riscv/grant_table.h b/xen/include/asm-riscv/grant_table.h
new file mode 100644
index 0000000000..40607ea2c8
--- /dev/null
+++ b/xen/include/asm-riscv/grant_table.h
@@ -0,0 +1,93 @@ 
+#ifndef __ASM_GRANT_TABLE_H__
+#define __ASM_GRANT_TABLE_H__
+
+#include <xen/grant_table.h>
+#include <xen/kernel.h>
+#include <xen/pfn.h>
+#include <asm/current.h>
+
+#define INITIAL_NR_GRANT_FRAMES 1U
+#define GNTTAB_MAX_VERSION 1
+
+struct grant_table_arch {
+    gfn_t *shared_gfn;
+    gfn_t *status_gfn;
+};
+
+void gnttab_clear_flags(struct domain *d,
+                        unsigned long nr, uint16_t *addr);
+int create_grant_host_mapping(unsigned long gpaddr, mfn_t mfn,
+                              unsigned int flags, unsigned int cache_flags);
+#define gnttab_host_mapping_get_page_type(ro, ld, rd) (0)
+int replace_grant_host_mapping(unsigned long gpaddr, mfn_t mfn,
+                               unsigned long new_gpaddr, unsigned int flags);
+void gnttab_mark_dirty(struct domain *d, mfn_t mfn);
+#define gnttab_release_host_mappings(domain) 1
+
+/*
+ * The region used by Xen on the memory will never be mapped in DOM0
+ * memory layout. Therefore it can be used for the grant table.
+ *
+ * Only use the text section as it's always present and will contain
+ * enough space for a large grant table
+ */
+#define gnttab_dom0_frames()                                             \
+    min_t(unsigned int, opt_max_grant_frames, PFN_DOWN(_etext - _stext))
+
+#define gnttab_init_arch(gt)                                             \
+({                                                                       \
+    unsigned int ngf_ = (gt)->max_grant_frames;                          \
+    unsigned int nsf_ = grant_to_status_frames(ngf_);                    \
+                                                                         \
+    (gt)->arch.shared_gfn = xmalloc_array(gfn_t, ngf_);                  \
+    (gt)->arch.status_gfn = xmalloc_array(gfn_t, nsf_);                  \
+    if ( (gt)->arch.shared_gfn && (gt)->arch.status_gfn )                \
+    {                                                                    \
+        while ( ngf_-- )                                                 \
+            (gt)->arch.shared_gfn[ngf_] = INVALID_GFN;                   \
+        while ( nsf_-- )                                                 \
+            (gt)->arch.status_gfn[nsf_] = INVALID_GFN;                   \
+    }                                                                    \
+    else                                                                 \
+        gnttab_destroy_arch(gt);                                         \
+    (gt)->arch.shared_gfn ? 0 : -ENOMEM;                                 \
+})
+
+#define gnttab_destroy_arch(gt)                                          \
+    do {                                                                 \
+        XFREE((gt)->arch.shared_gfn);                                    \
+        XFREE((gt)->arch.status_gfn);                                    \
+    } while ( 0 )
+
+#define gnttab_set_frame_gfn(gt, st, idx, gfn)                           \
+    do {                                                                 \
+        ((st) ? (gt)->arch.status_gfn : (gt)->arch.shared_gfn)[idx] =    \
+            (gfn);                                                       \
+    } while ( 0 )
+
+#define gnttab_get_frame_gfn(gt, st, idx) ({                             \
+   (st) ? gnttab_status_gfn(NULL, gt, idx)                               \
+        : gnttab_shared_gfn(NULL, gt, idx);                              \
+})
+
+#define gnttab_shared_gfn(d, t, i)                                       \
+    (((i) >= nr_grant_frames(t)) ? INVALID_GFN : (t)->arch.shared_gfn[i])
+
+#define gnttab_status_gfn(d, t, i)                                       \
+    (((i) >= nr_status_frames(t)) ? INVALID_GFN : (t)->arch.status_gfn[i])
+
+#define gnttab_status_gfn(d, t, i)                                       \
+    (((i) >= nr_status_frames(t)) ? INVALID_GFN : (t)->arch.status_gfn[i])
+
+#define gnttab_need_iommu_mapping(d)                    \
+    (is_domain_direct_mapped(d) && need_iommu_pt_sync(d))
+
+#endif /* __ASM_GRANT_TABLE_H__ */
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-riscv/guest_access.h b/xen/include/asm-riscv/guest_access.h
new file mode 100644
index 0000000000..029e189d4d
--- /dev/null
+++ b/xen/include/asm-riscv/guest_access.h
@@ -0,0 +1,164 @@ 
+#ifndef __ASM_RISCV_GUEST_ACCESS_H__
+#define __ASM_RISCV_GUEST_ACCESS_H__
+
+#include <xen/guest_access.h>
+#include <xen/errno.h>
+#include <xen/sched.h>
+
+unsigned long raw_copy_to_guest(void *to, const void *from, unsigned len);
+unsigned long raw_copy_to_guest_flush_dcache(void *to, const void *from,
+                                             unsigned len);
+unsigned long raw_copy_from_guest(void *to, const void *from, unsigned len);
+unsigned long raw_clear_guest(void *to, unsigned len);
+
+/* Copy data to guest physical address, then clean the region. */
+unsigned long copy_to_guest_phys_flush_dcache(struct domain *d,
+                                              paddr_t phys,
+                                              void *buf,
+                                              unsigned int len);
+
+int access_guest_memory_by_ipa(struct domain *d, paddr_t ipa, void *buf,
+                               uint32_t size, bool is_write);
+
+#define __raw_copy_to_guest raw_copy_to_guest
+#define __raw_copy_from_guest raw_copy_from_guest
+#define __raw_clear_guest raw_clear_guest
+
+/* Remainder copied from ARM and x86 -- could be common? */
+
+/* Is the guest handle a NULL reference? */
+#define guest_handle_is_null(hnd)        ((hnd).p == NULL)
+
+/* Offset the given guest handle into the array it refers to. */
+#define guest_handle_add_offset(hnd, nr) ((hnd).p += (nr))
+#define guest_handle_subtract_offset(hnd, nr) ((hnd).p -= (nr))
+
+/* Cast a guest handle (either XEN_GUEST_HANDLE or XEN_GUEST_HANDLE_PARAM)
+ * to the specified type of XEN_GUEST_HANDLE_PARAM. */
+#define guest_handle_cast(hnd, type) ({         \
+    type *_x = (hnd).p;                         \
+    (XEN_GUEST_HANDLE_PARAM(type)) { _x };            \
+})
+
+/* Cast a XEN_GUEST_HANDLE to XEN_GUEST_HANDLE_PARAM */
+#define guest_handle_to_param(hnd, type) ({                  \
+    typeof((hnd).p) _x = (hnd).p;                            \
+    XEN_GUEST_HANDLE_PARAM(type) _y = { _x };                \
+    /* type checking: make sure that the pointers inside     \
+     * XEN_GUEST_HANDLE and XEN_GUEST_HANDLE_PARAM are of    \
+     * the same type, then return hnd */                     \
+    (void)(&_x == &_y.p);                                    \
+    _y;                                                      \
+})
+
+
+/* Cast a XEN_GUEST_HANDLE_PARAM to XEN_GUEST_HANDLE */
+#define guest_handle_from_param(hnd, type) ({               \
+    typeof((hnd).p) _x = (hnd).p;                           \
+    XEN_GUEST_HANDLE(type) _y = { _x };                     \
+    /* type checking: make sure that the pointers inside    \
+     * XEN_GUEST_HANDLE and XEN_GUEST_HANDLE_PARAM are of   \
+     * the same type, then return hnd */                    \
+    (void)(&_x == &_y.p);                                   \
+    _y;                                                     \
+})
+
+#define guest_handle_for_field(hnd, type, fld)          \
+    ((XEN_GUEST_HANDLE(type)) { &(hnd).p->fld })
+
+#define guest_handle_from_ptr(ptr, type)        \
+    ((XEN_GUEST_HANDLE_PARAM(type)) { (type *)ptr })
+#define const_guest_handle_from_ptr(ptr, type)  \
+    ((XEN_GUEST_HANDLE_PARAM(const_##type)) { (const type *)ptr })
+
+/*
+ * Copy an array of objects to guest context via a guest handle,
+ * specifying an offset into the guest array.
+ */
+#define copy_to_guest_offset(hnd, off, ptr, nr) ({      \
+    const typeof(*(ptr)) *_s = (ptr);                   \
+    char (*_d)[sizeof(*_s)] = (void *)(hnd).p;          \
+    ((void)((hnd).p == (ptr)));                         \
+    raw_copy_to_guest(_d+(off), _s, sizeof(*_s)*(nr));  \
+})
+
+/*
+ * Clear an array of objects in guest context via a guest handle,
+ * specifying an offset into the guest array.
+ */
+#define clear_guest_offset(hnd, off, nr) ({    \
+    void *_d = (hnd).p;                        \
+    raw_clear_guest(_d+(off), nr);             \
+})
+
+/*
+ * Copy an array of objects from guest context via a guest handle,
+ * specifying an offset into the guest array.
+ */
+#define copy_from_guest_offset(ptr, hnd, off, nr) ({    \
+    const typeof(*(ptr)) *_s = (hnd).p;                 \
+    typeof(*(ptr)) *_d = (ptr);                         \
+    raw_copy_from_guest(_d, _s+(off), sizeof(*_d)*(nr));\
+})
+
+/* Copy sub-field of a structure to guest context via a guest handle. */
+#define copy_field_to_guest(hnd, ptr, field) ({         \
+    const typeof(&(ptr)->field) _s = &(ptr)->field;     \
+    void *_d = &(hnd).p->field;                         \
+    ((void)(&(hnd).p->field == &(ptr)->field));         \
+    raw_copy_to_guest(_d, _s, sizeof(*_s));             \
+})
+
+/* Copy sub-field of a structure from guest context via a guest handle. */
+#define copy_field_from_guest(ptr, hnd, field) ({       \
+    const typeof(&(ptr)->field) _s = &(hnd).p->field;   \
+    typeof(&(ptr)->field) _d = &(ptr)->field;           \
+    raw_copy_from_guest(_d, _s, sizeof(*_d));           \
+})
+
+/*
+ * Pre-validate a guest handle.
+ * Allows use of faster __copy_* functions.
+ */
+#define guest_handle_okay(hnd, nr) (1)
+#define guest_handle_subrange_okay(hnd, first, last) (1)
+
+#define __copy_to_guest_offset(hnd, off, ptr, nr) ({    \
+    const typeof(*(ptr)) *_s = (ptr);                   \
+    char (*_d)[sizeof(*_s)] = (void *)(hnd).p;          \
+    ((void)((hnd).p == (ptr)));                         \
+    __raw_copy_to_guest(_d+(off), _s, sizeof(*_s)*(nr));\
+})
+
+#define __clear_guest_offset(hnd, off, ptr, nr) ({      \
+    __raw_clear_guest(_d+(off), nr);  \
+})
+
+#define __copy_from_guest_offset(ptr, hnd, off, nr) ({  \
+    const typeof(*(ptr)) *_s = (hnd).p;                 \
+    typeof(*(ptr)) *_d = (ptr);                         \
+    __raw_copy_from_guest(_d, _s+(off), sizeof(*_d)*(nr));\
+})
+
+#define __copy_field_to_guest(hnd, ptr, field) ({       \
+    const typeof(&(ptr)->field) _s = &(ptr)->field;     \
+    void *_d = &(hnd).p->field;                         \
+    ((void)(&(hnd).p->field == &(ptr)->field));         \
+    __raw_copy_to_guest(_d, _s, sizeof(*_s));           \
+})
+
+#define __copy_field_from_guest(ptr, hnd, field) ({     \
+    const typeof(&(ptr)->field) _s = &(hnd).p->field;   \
+    typeof(&(ptr)->field) _d = &(ptr)->field;           \
+    __raw_copy_from_guest(_d, _s, sizeof(*_d));         \
+})
+
+#endif /* __ASM_RISCV_GUEST_ACCESS_H__ */
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-riscv/guest_atomics.h b/xen/include/asm-riscv/guest_atomics.h
new file mode 100644
index 0000000000..45064a1b10
--- /dev/null
+++ b/xen/include/asm-riscv/guest_atomics.h
@@ -0,0 +1,62 @@ 
+#ifndef _RISCV_GUEST_ATOMICS_H
+#define _RISCV_GUEST_ATOMICS_H
+
+/*
+ * TODO: implement guest atomics
+ */
+
+
+/* TODO */
+#define guest_testop(name)                                                  \
+static inline int guest_##name(struct domain *d, int nr, volatile void *p)  \
+{                                                                           \
+    (void) d;       \
+    (void) nr;      \
+    (void) p;       \
+                                                                            \
+    return 0;                                                               \
+}
+
+
+/* TODO */
+#define guest_bitop(name)                                                   \
+static inline void guest_##name(struct domain *d, int nr, volatile void *p) \
+{                                                                           \
+    (void) d;                                                               \
+    (void) nr;                                                              \
+    (void) p;                                                               \
+}
+
+guest_bitop(set_bit)
+guest_bitop(clear_bit)
+guest_bitop(change_bit)
+
+#undef guest_bitop
+
+guest_testop(test_and_set_bit)
+guest_testop(test_and_clear_bit)
+guest_testop(test_and_change_bit)
+
+#undef guest_testop
+
+#define guest_test_bit(d, nr, p) ((void)(d), test_bit(nr, p))
+
+static inline int guest_cmpxchg(void *d, void *ptr, uint32_t o, uint32_t n)
+{                
+    (void) d;       
+    (void) ptr;     
+    (void) o;       
+    (void) n;       
+
+    return 0;
+}
+
+#endif /* _RISCV_GUEST_ATOMICS_H */
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-riscv/hardirq.h b/xen/include/asm-riscv/hardirq.h
new file mode 100644
index 0000000000..67b6a673db
--- /dev/null
+++ b/xen/include/asm-riscv/hardirq.h
@@ -0,0 +1,27 @@ 
+#ifndef __ASM_HARDIRQ_H
+#define __ASM_HARDIRQ_H
+
+#include <xen/cache.h>
+#include <xen/smp.h>
+
+typedef struct {
+        unsigned long __softirq_pending;
+        unsigned int __local_irq_count;
+} __cacheline_aligned irq_cpustat_t;
+
+#include <xen/irq_cpustat.h>    /* Standard mappings for irq_cpustat_t above */
+
+#define in_irq() (local_irq_count(smp_processor_id()) != 0)
+
+#define irq_enter()     (local_irq_count(smp_processor_id())++)
+#define irq_exit()      (local_irq_count(smp_processor_id())--)
+
+#endif /* __ASM_HARDIRQ_H */
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-riscv/hypercall.h b/xen/include/asm-riscv/hypercall.h
new file mode 100644
index 0000000000..8af474b5e2
--- /dev/null
+++ b/xen/include/asm-riscv/hypercall.h
@@ -0,0 +1,12 @@ 
+#ifndef __ASM_RISCV_HYPERCALL_H__
+#define __ASM_RISCV_HYPERCALL_H__
+
+#endif /* __ASM_RISCV_HYPERCALL_H__ */
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-riscv/init.h b/xen/include/asm-riscv/init.h
new file mode 100644
index 0000000000..8410ccead3
--- /dev/null
+++ b/xen/include/asm-riscv/init.h
@@ -0,0 +1,42 @@ 
+#ifndef _XEN_ASM_INIT_H
+#define _XEN_ASM_INIT_H
+
+#ifndef __ASSEMBLY__
+
+struct init_info
+{
+    /* Pointer to the stack, used by head.S when entering in C */
+    unsigned char *stack;
+    /* Logical CPU ID, used by start_secondary */
+    unsigned int cpuid;
+};
+
+#endif /* __ASSEMBLY__ */
+
+/* For assembly routines */
+#define __HEAD		.section	".head.text","ax"
+#define __INIT		.section	".init.text","ax"
+#define __FINIT		.previous
+
+#define __INITDATA	.section	".init.data","aw",%progbits
+#define __INITRODATA	.section	".init.rodata","a",%progbits
+#define __FINITDATA	.previous
+
+#define __MEMINIT        .section	".meminit.text", "ax"
+#define __MEMINITDATA    .section	".meminit.data", "aw"
+#define __MEMINITRODATA  .section	".meminit.rodata", "a"
+
+/* silence warnings when references are OK */
+#define __REF            .section       ".ref.text", "ax"
+#define __REFDATA        .section       ".ref.data", "aw"
+#define __REFCONST       .section       ".ref.rodata", "a"
+
+#endif /* _XEN_ASM_INIT_H */
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-riscv/io.h b/xen/include/asm-riscv/io.h
new file mode 100644
index 0000000000..92d17ebfa8
--- /dev/null
+++ b/xen/include/asm-riscv/io.h
@@ -0,0 +1,283 @@ 
+/*
+ * {read,write}{b,w,l,q} based on arch/arm64/include/asm/io.h
+ *   which was based on arch/arm/include/io.h
+ *
+ * Copyright (C) 1996-2000 Russell King
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2014 Regents of the University of California
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_IO_H
+#define _ASM_RISCV_IO_H
+
+#include <asm/byteorder.h>
+
+/*
+ * The RISC-V ISA doesn't yet specify how to query or modify PMAs, so we can't
+ * change the properties of memory regions.  This should be fixed by the
+ * upcoming platform spec.
+ */
+#define ioremap_nocache(addr, size) ioremap((addr), (size))
+#define ioremap_wc(addr, size) ioremap((addr), (size))
+#define ioremap_wt(addr, size) ioremap((addr), (size))
+
+/* Generic IO read/write.  These perform native-endian accesses. */
+#define __raw_writeb __raw_writeb
+static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
+{
+	asm volatile("sb %0, 0(%1)" : : "r" (val), "r" (addr));
+}
+
+#define __raw_writew __raw_writew
+static inline void __raw_writew(u16 val, volatile void __iomem *addr)
+{
+	asm volatile("sh %0, 0(%1)" : : "r" (val), "r" (addr));
+}
+
+#define __raw_writel __raw_writel
+static inline void __raw_writel(u32 val, volatile void __iomem *addr)
+{
+	asm volatile("sw %0, 0(%1)" : : "r" (val), "r" (addr));
+}
+
+#ifdef CONFIG_64BIT
+#define __raw_writeq __raw_writeq
+static inline void __raw_writeq(u64 val, volatile void __iomem *addr)
+{
+	asm volatile("sd %0, 0(%1)" : : "r" (val), "r" (addr));
+}
+#endif
+
+#define __raw_readb __raw_readb
+static inline u8 __raw_readb(const volatile void __iomem *addr)
+{
+	u8 val;
+
+	asm volatile("lb %0, 0(%1)" : "=r" (val) : "r" (addr));
+	return val;
+}
+
+#define __raw_readw __raw_readw
+static inline u16 __raw_readw(const volatile void __iomem *addr)
+{
+	u16 val;
+
+	asm volatile("lh %0, 0(%1)" : "=r" (val) : "r" (addr));
+	return val;
+}
+
+#define __raw_readl __raw_readl
+static inline u32 __raw_readl(const volatile void __iomem *addr)
+{
+	u32 val;
+
+	asm volatile("lw %0, 0(%1)" : "=r" (val) : "r" (addr));
+	return val;
+}
+
+#ifdef CONFIG_64BIT
+#define __raw_readq __raw_readq
+static inline u64 __raw_readq(const volatile void __iomem *addr)
+{
+	u64 val;
+
+	asm volatile("ld %0, 0(%1)" : "=r" (val) : "r" (addr));
+	return val;
+}
+#endif
+
+/*
+ * Unordered I/O memory access primitives.  These are even more relaxed than
+ * the relaxed versions, as they don't even order accesses between successive
+ * operations to the I/O regions.
+ */
+#define readb_cpu(c)		({ u8  __r = __raw_readb(c); __r; })
+#define readw_cpu(c)		({ u16 __r = le16_to_cpu((__force __le16)__raw_readw(c)); __r; })
+#define readl_cpu(c)		({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; })
+
+#define writeb_cpu(v,c)		((void)__raw_writeb((v),(c)))
+#define writew_cpu(v,c)		((void)__raw_writew((__force u16)cpu_to_le16(v),(c)))
+#define writel_cpu(v,c)		((void)__raw_writel((__force u32)cpu_to_le32(v),(c)))
+
+#ifdef CONFIG_64BIT
+#define readq_cpu(c)		({ u64 __r = le64_to_cpu((__force __le64)__raw_readq(c)); __r; })
+#define writeq_cpu(v,c)		((void)__raw_writeq((__force u64)cpu_to_le64(v),(c)))
+#endif
+
+/*
+ * Relaxed I/O memory access primitives. These follow the Device memory
+ * ordering rules but do not guarantee any ordering relative to Normal memory
+ * accesses.  These are defined to order the indicated access (either a read or
+ * write) with all other I/O memory accesses. Since the platform specification
+ * defines that all I/O regions are strongly ordered on channel 2, no explicit
+ * fences are required to enforce this ordering.
+ */
+/* FIXME: These are now the same as asm-generic */
+#define __io_rbr()		do {} while (0)
+#define __io_rar()		do {} while (0)
+#define __io_rbw()		do {} while (0)
+#define __io_raw()		do {} while (0)
+
+#define readb_relaxed(c)	({ u8  __v; __io_rbr(); __v = readb_cpu(c); __io_rar(); __v; })
+#define readw_relaxed(c)	({ u16 __v; __io_rbr(); __v = readw_cpu(c); __io_rar(); __v; })
+#define readl_relaxed(c)	({ u32 __v; __io_rbr(); __v = readl_cpu(c); __io_rar(); __v; })
+
+#define writeb_relaxed(v,c)	({ __io_rbw(); writeb_cpu((v),(c)); __io_raw(); })
+#define writew_relaxed(v,c)	({ __io_rbw(); writew_cpu((v),(c)); __io_raw(); })
+#define writel_relaxed(v,c)	({ __io_rbw(); writel_cpu((v),(c)); __io_raw(); })
+
+#ifdef CONFIG_64BIT
+#define readq_relaxed(c)	({ u64 __v; __io_rbr(); __v = readq_cpu(c); __io_rar(); __v; })
+#define writeq_relaxed(v,c)	({ __io_rbw(); writeq_cpu((v),(c)); __io_raw(); })
+#endif
+
+/*
+ * I/O memory access primitives. Reads are ordered relative to any
+ * following Normal memory access. Writes are ordered relative to any prior
+ * Normal memory access.  The memory barriers here are necessary as RISC-V
+ * doesn't define any ordering between the memory space and the I/O space.
+ */
+#define __io_br()	do {} while (0)
+#define __io_ar(v)	__asm__ __volatile__ ("fence i,r" : : : "memory");
+#define __io_bw()	__asm__ __volatile__ ("fence w,o" : : : "memory");
+#define __io_aw()	do { } while (0)
+
+#define readb(c)	({ u8  __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; })
+#define readw(c)	({ u16 __v; __io_br(); __v = readw_cpu(c); __io_ar(__v); __v; })
+#define readl(c)	({ u32 __v; __io_br(); __v = readl_cpu(c); __io_ar(__v); __v; })
+
+#define writeb(v,c)	({ __io_bw(); writeb_cpu((v),(c)); __io_aw(); })
+#define writew(v,c)	({ __io_bw(); writew_cpu((v),(c)); __io_aw(); })
+#define writel(v,c)	({ __io_bw(); writel_cpu((v),(c)); __io_aw(); })
+
+#ifdef CONFIG_64BIT
+#define readq(c)	({ u64 __v; __io_br(); __v = readq_cpu(c); __io_ar(__v); __v; })
+#define writeq(v,c)	({ __io_bw(); writeq_cpu((v),(c)); __io_aw(); })
+#endif
+
+/*
+ * Emulation routines for the port-mapped IO space used by some PCI drivers.
+ * These are defined as being "fully synchronous", but also "not guaranteed to
+ * be fully ordered with respect to other memory and I/O operations".  We're
+ * going to be on the safe side here and just make them:
+ *  - Fully ordered WRT each other, by bracketing them with two fences.  The
+ *    outer set contains both I/O so inX is ordered with outX, while the inner just
+ *    needs the type of the access (I for inX and O for outX).
+ *  - Ordered in the same manner as readX/writeX WRT memory by subsuming their
+ *    fences.
+ *  - Ordered WRT timer reads, so udelay and friends don't get elided by the
+ *    implementation.
+ * Note that there is no way to actually enforce that outX is a non-posted
+ * operation on RISC-V, but hopefully the timer ordering constraint is
+ * sufficient to ensure this works sanely on controllers that support I/O
+ * writes.
+ */
+#define __io_pbr()	__asm__ __volatile__ ("fence io,i"  : : : "memory");
+#define __io_par(v)	__asm__ __volatile__ ("fence i,ior" : : : "memory");
+#define __io_pbw()	__asm__ __volatile__ ("fence iow,o" : : : "memory");
+#define __io_paw()	__asm__ __volatile__ ("fence o,io"  : : : "memory");
+
+#define inb(c)		({ u8  __v; __io_pbr(); __v = readb_cpu((void*)(PCI_IOBASE + (c))); __io_par(__v); __v; })
+#define inw(c)		({ u16 __v; __io_pbr(); __v = readw_cpu((void*)(PCI_IOBASE + (c))); __io_par(__v); __v; })
+#define inl(c)		({ u32 __v; __io_pbr(); __v = readl_cpu((void*)(PCI_IOBASE + (c))); __io_par(__v); __v; })
+
+#define outb(v,c)	({ __io_pbw(); writeb_cpu((v),(void*)(PCI_IOBASE + (c))); __io_paw(); })
+#define outw(v,c)	({ __io_pbw(); writew_cpu((v),(void*)(PCI_IOBASE + (c))); __io_paw(); })
+#define outl(v,c)	({ __io_pbw(); writel_cpu((v),(void*)(PCI_IOBASE + (c))); __io_paw(); })
+
+#ifdef CONFIG_64BIT
+#define inq(c)		({ u64 __v; __io_pbr(); __v = readq_cpu((void*)(c)); __io_par(__v); __v; })
+#define outq(v,c)	({ __io_pbw(); writeq_cpu((v),(void*)(c)); __io_paw(); })
+#endif
+
+/*
+ * Accesses from a single hart to a single I/O address must be ordered.  This
+ * allows us to use the raw read macros, but we still need to fence before and
+ * after the block to ensure ordering WRT other macros.  These are defined to
+ * perform host-endian accesses so we use __raw instead of __cpu.
+ */
+#define __io_reads_ins(port, ctype, len, bfence, afence)			\
+	static inline void __ ## port ## len(const volatile void __iomem *addr,	\
+					     void *buffer,			\
+					     unsigned int count)		\
+	{									\
+		bfence;								\
+		if (count) {							\
+			ctype *buf = buffer;					\
+										\
+			do {							\
+				ctype x = __raw_read ## len(addr);		\
+				*buf++ = x;					\
+			} while (--count);					\
+		}								\
+		afence;								\
+	}
+
+#define __io_writes_outs(port, ctype, len, bfence, afence)			\
+	static inline void __ ## port ## len(volatile void __iomem *addr,	\
+					     const void *buffer,		\
+					     unsigned int count)		\
+	{									\
+		bfence;								\
+		if (count) {							\
+			const ctype *buf = buffer;				\
+										\
+			do {							\
+				__raw_write ## len(*buf++, addr);		\
+			} while (--count);					\
+		}								\
+		afence;								\
+	}
+
+__io_reads_ins(reads,  u8, b, __io_br(), __io_ar(addr))
+__io_reads_ins(reads, u16, w, __io_br(), __io_ar(addr))
+__io_reads_ins(reads, u32, l, __io_br(), __io_ar(addr))
+#define readsb(addr, buffer, count) __readsb(addr, buffer, count)
+#define readsw(addr, buffer, count) __readsw(addr, buffer, count)
+#define readsl(addr, buffer, count) __readsl(addr, buffer, count)
+
+__io_reads_ins(ins,  u8, b, __io_pbr(), __io_par(addr))
+__io_reads_ins(ins, u16, w, __io_pbr(), __io_par(addr))
+__io_reads_ins(ins, u32, l, __io_pbr(), __io_par(addr))
+#define insb(addr, buffer, count) __insb((void __iomem *)(long)addr, buffer, count)
+#define insw(addr, buffer, count) __insw((void __iomem *)(long)addr, buffer, count)
+#define insl(addr, buffer, count) __insl((void __iomem *)(long)addr, buffer, count)
+
+__io_writes_outs(writes,  u8, b, __io_bw(), __io_aw())
+__io_writes_outs(writes, u16, w, __io_bw(), __io_aw())
+__io_writes_outs(writes, u32, l, __io_bw(), __io_aw())
+#define writesb(addr, buffer, count) __writesb(addr, buffer, count)
+#define writesw(addr, buffer, count) __writesw(addr, buffer, count)
+#define writesl(addr, buffer, count) __writesl(addr, buffer, count)
+
+__io_writes_outs(outs,  u8, b, __io_pbw(), __io_paw())
+__io_writes_outs(outs, u16, w, __io_pbw(), __io_paw())
+__io_writes_outs(outs, u32, l, __io_pbw(), __io_paw())
+#define outsb(addr, buffer, count) __outsb((void __iomem *)(long)addr, buffer, count)
+#define outsw(addr, buffer, count) __outsw((void __iomem *)(long)addr, buffer, count)
+#define outsl(addr, buffer, count) __outsl((void __iomem *)(long)addr, buffer, count)
+
+#ifdef CONFIG_64BIT
+__io_reads_ins(reads, u64, q, __io_br(), __io_ar(addr))
+#define readsq(addr, buffer, count) __readsq(addr, buffer, count)
+
+__io_reads_ins(ins, u64, q, __io_pbr(), __io_par(addr))
+#define insq(addr, buffer, count) __insq((void __iomem *)addr, buffer, count)
+
+__io_writes_outs(writes, u64, q, __io_bw(), __io_aw())
+#define writesq(addr, buffer, count) __writesq(addr, buffer, count)
+
+__io_writes_outs(outs, u64, q, __io_pbr(), __io_paw())
+#define outsq(addr, buffer, count) __outsq((void __iomem *)addr, buffer, count)
+#endif
+
+#endif /* _ASM_RISCV_IO_H */
diff --git a/xen/include/asm-riscv/iocap.h b/xen/include/asm-riscv/iocap.h
new file mode 100644
index 0000000000..712f34528e
--- /dev/null
+++ b/xen/include/asm-riscv/iocap.h
@@ -0,0 +1,16 @@ 
+#ifndef __RISCV_IOCAP_H__
+#define __RISCV_IOCAP_H__
+
+#define cache_flush_permitted(d)                        \
+    (!rangeset_is_empty((d)->iomem_caps))
+
+#endif
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-riscv/iommu.h b/xen/include/asm-riscv/iommu.h
new file mode 100644
index 0000000000..fbd212e1d7
--- /dev/null
+++ b/xen/include/asm-riscv/iommu.h
@@ -0,0 +1,49 @@ 
+/******************************************************************************
+ *
+ * Copyright 2019 (C) Alistair Francis <alistair.francis@wdc.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __ARCH_RISCV_IOMMU_H__
+#define __ARCH_RISCV_IOMMU_H__
+
+struct arch_iommu
+{
+    /* Private information for the IOMMU drivers */
+    void *priv;
+};
+
+/* Always share P2M Table between the CPU and the IOMMU */
+#define iommu_use_hap_pt(d) (has_iommu_pt(d))
+
+const struct iommu_ops *iommu_get_ops(void);
+void iommu_set_ops(const struct iommu_ops *ops);
+
+#endif /* __ARCH_RISCV_IOMMU_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-riscv/irq.h b/xen/include/asm-riscv/irq.h
new file mode 100644
index 0000000000..ae17872d4d
--- /dev/null
+++ b/xen/include/asm-riscv/irq.h
@@ -0,0 +1,58 @@ 
+#ifndef _ASM_HW_IRQ_H
+#define _ASM_HW_IRQ_H
+
+#include <public/device_tree_defs.h>
+
+/*
+ * These defines correspond to the Xen internal representation of the
+ * IRQ types. We choose to make them the same as the existing device
+ * tree definitions for convenience.
+ */
+#define IRQ_TYPE_NONE           DT_IRQ_TYPE_NONE
+#define IRQ_TYPE_EDGE_RISING    DT_IRQ_TYPE_EDGE_RISING
+#define IRQ_TYPE_EDGE_FALLING   DT_IRQ_TYPE_EDGE_FALLING
+#define IRQ_TYPE_EDGE_BOTH      DT_IRQ_TYPE_EDGE_BOTH
+#define IRQ_TYPE_LEVEL_HIGH     DT_IRQ_TYPE_LEVEL_HIGH
+#define IRQ_TYPE_LEVEL_LOW      DT_IRQ_TYPE_LEVEL_LOW
+#define IRQ_TYPE_LEVEL_MASK     DT_IRQ_TYPE_LEVEL_MASK
+#define IRQ_TYPE_SENSE_MASK     DT_IRQ_TYPE_SENSE_MASK
+#define IRQ_TYPE_INVALID        DT_IRQ_TYPE_INVALID
+
+#define NR_LOCAL_IRQS	32
+#define NR_IRQS		1024
+
+typedef struct {
+} vmask_t;
+
+struct arch_pirq
+{
+};
+
+struct arch_irq_desc {
+};
+
+struct irq_desc;
+
+struct irq_desc *__irq_to_desc(int irq);
+
+#define irq_to_desc(irq)    __irq_to_desc(irq)
+
+void arch_move_irqs(struct vcpu *v);
+
+#define domain_pirq_to_irq(d, pirq) (pirq)
+
+extern const unsigned int nr_irqs;
+#define nr_static_irqs NR_IRQS
+#define arch_hwdom_irqs(domid) NR_IRQS
+
+#define arch_evtchn_bind_pirq(d, pirq) ((void)((d) + (pirq)))
+
+#endif /* _ASM_HW_IRQ_H */
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-riscv/mem_access.h b/xen/include/asm-riscv/mem_access.h
new file mode 100644
index 0000000000..aa7decb629
--- /dev/null
+++ b/xen/include/asm-riscv/mem_access.h
@@ -0,0 +1,35 @@ 
+/******************************************************************************
+ *
+ * Copyright 2019 (C) Alistair Francis <alistair.francis@wdc.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _ASM_RISCV_MEM_ACCESS_H
+#define _ASM_RISCV_MEM_ACCESS_H
+
+#include <xen/errno.h>
+/* vm_event and mem_access are supported on any ARM guest */
+static inline bool p2m_mem_access_sanity_check(struct domain *d)
+{
+    return true;
+}
+
+#endif /* _ASM_RISCV_MEM_ACCESS_H */
diff --git a/xen/include/asm-riscv/mm.h b/xen/include/asm-riscv/mm.h
new file mode 100644
index 0000000000..7be9cd306a
--- /dev/null
+++ b/xen/include/asm-riscv/mm.h
@@ -0,0 +1,308 @@ 
+#ifndef __ARCH_RISCV_MM__
+#define __ARCH_RISCV_MM__
+
+#include <xen/kernel.h>
+#include <asm/page.h>
+#include <asm/pgtable-bits.h>
+#include <public/xen.h>
+#include <xen/pdx.h>
+#include <xen/errno.h>
+
+/* Align Xen to a 2 MiB boundary. */
+#define XEN_PADDR_ALIGN (1 << 21)
+
+/* TODO: Rewrite this file to be correct */
+
+/*
+ * Per-page-frame information.
+ *
+ * Every architecture must ensure the following:
+ *  1. 'struct page_info' contains a 'struct page_list_entry list'.
+ *  2. Provide a PFN_ORDER() macro for accessing the order of a free page.
+ */
+#define PFN_ORDER(_pfn) ((_pfn)->v.free.order)
+
+extern unsigned long frametable_base_pdx;
+
+struct page_info
+{
+    /* Each frame can be threaded onto a doubly-linked list. */
+    struct page_list_entry list;
+
+    /* Reference count and various PGC_xxx flags and fields. */
+    unsigned long count_info;
+
+    /* Context-dependent fields follow... */
+    union {
+        /* Page is in use: ((count_info & PGC_count_mask) != 0). */
+        struct {
+            /* Type reference count and various PGT_xxx flags and fields. */
+            unsigned long type_info;
+        } inuse;
+        /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */
+        union {
+            struct {
+                /*
+                 * Index of the first *possibly* unscrubbed page in the buddy.
+                 * One more bit than maximum possible order to accommodate
+                 * INVALID_DIRTY_IDX.
+                 */
+#define INVALID_DIRTY_IDX ((1UL << (MAX_ORDER + 1)) - 1)
+                unsigned long first_dirty:MAX_ORDER + 1;
+
+                /* Do TLBs need flushing for safety before next page use? */
+                bool need_tlbflush:1;
+
+#define BUDDY_NOT_SCRUBBING    0
+#define BUDDY_SCRUBBING        1
+#define BUDDY_SCRUB_ABORT      2
+                unsigned long scrub_state:2;
+            };
+
+            unsigned long val;
+            } free;
+
+    } u;
+
+    union {
+        /* Page is in use, but not as a shadow. */
+        struct {
+            /* Owner of this page (zero if page is anonymous). */
+            struct domain *domain;
+        } inuse;
+
+        /* Page is on a free list. */
+        struct {
+            /* Order-size of the free chunk this page is the head of. */
+            unsigned int order;
+        } free;
+
+    } v;
+
+    union {
+        /*
+         * Timestamp from 'TLB clock', used to avoid extra safety flushes.
+         * Only valid for: a) free pages, and b) pages with zero type count
+         */
+        u32 tlbflush_timestamp;
+    };
+    u64 pad;
+
+};
+
+#define PG_shift(idx)   (BITS_PER_LONG - (idx))
+#define PG_mask(x, idx) (x ## UL << PG_shift(idx))
+
+#define PGT_none          PG_mask(0, 1)  /* no special uses of this page   */
+#define PGT_writable_page PG_mask(1, 1)  /* has writable mappings?         */
+#define PGT_type_mask     PG_mask(1, 1)  /* Bits 31 or 63.                 */
+
+ /* Count of uses of this frame as its current type. */
+#define PGT_count_width   PG_shift(2)
+#define PGT_count_mask    ((1UL<<PGT_count_width)-1)
+
+ /* Cleared when the owning guest 'frees' this page. */
+#define _PGC_allocated    PG_shift(1)
+#define PGC_allocated     PG_mask(1, 1)
+  /* Page is Xen heap? */
+#define _PGC_xen_heap     PG_shift(2)
+#define PGC_xen_heap      PG_mask(1, 2)
+/* ... */
+/* Page is broken? */
+#define _PGC_broken       PG_shift(7)
+#define PGC_broken        PG_mask(1, 7)
+ /* Mutually-exclusive page states: { inuse, offlining, offlined, free }. */
+#define PGC_state         PG_mask(3, 9)
+#define PGC_state_inuse   PG_mask(0, 9)
+#define PGC_state_offlining PG_mask(1, 9)
+#define PGC_state_offlined PG_mask(2, 9)
+#define PGC_state_free    PG_mask(3, 9)
+#define page_state_is(pg, st) (((pg)->count_info&PGC_state) == PGC_state_##st)
+
+/* Count of references to this frame. */
+#define PGC_count_width   PG_shift(9)
+#define PGC_count_mask    ((1UL<<PGC_count_width)-1)
+
+extern mfn_t xenheap_mfn_start, xenheap_mfn_end;
+extern vaddr_t xenheap_virt_end;
+extern vaddr_t xenheap_virt_start;
+
+#define is_xen_heap_page(page) ((page)->count_info & PGC_xen_heap)
+#define is_xen_heap_mfn(mfn) \
+    (mfn_valid(_mfn(mfn)) && is_xen_heap_page(mfn_to_page(_mfn(mfn))))
+
+#define is_xen_fixed_mfn(mfn)                                   \
+    ((mfn_to_maddr(mfn) >= virt_to_maddr(&_start)) &&       \
+     (mfn_to_maddr(mfn) <= virt_to_maddr(&_end)))
+
+#define page_get_owner(_p)    (_p)->v.inuse.domain
+#define page_set_owner(_p,_d) ((_p)->v.inuse.domain = (_d))
+
+#define maddr_get_owner(ma)   (page_get_owner(maddr_to_page((ma))))
+
+#define frame_table ((struct page_info *)FRAMETABLE_VIRT_START)
+
+#define PDX_GROUP_SHIFT (PAGE_SHIFT + 9)
+
+/* XXX -- account for base */
+#define mfn_valid(mfn)        0
+
+/* Convert between machine frame numbers and page-info structures. */
+/* NOTE: mfn_to_page(x) -> FRAMETABLE_VIRT_START + (sizeof(struct page_info *) * mfn) */
+#define mfn_to_page(mfn)                                            \
+    (frame_table + (mfn_to_pdx(mfn)))
+#define page_to_mfn(pg)                                             \
+    pdx_to_mfn((unsigned long)((pg) - frame_table))
+
+/* Convert between machine addresses and page-info structures. */
+#define maddr_to_page(ma) mfn_to_page(maddr_to_mfn(ma))
+#define page_to_maddr(pg) (mfn_to_maddr(page_to_mfn(pg)))
+
+/* Convert between frame number and address formats.  */
+#define pfn_to_paddr(pfn) ((paddr_t)(pfn) << PAGE_SHIFT)
+#define paddr_to_pfn(pa)  ((unsigned long)((pa) >> PAGE_SHIFT))
+#define paddr_to_pdx(pa)    mfn_to_pdx(maddr_to_mfn(pa))
+#define gfn_to_gaddr(gfn)   pfn_to_paddr(gfn_x(gfn))
+#define gaddr_to_gfn(ga)    _gfn(paddr_to_pfn(ga))
+#define mfn_to_maddr(mfn)   pfn_to_paddr(mfn_x(mfn))
+#define maddr_to_mfn(ma)    _mfn(paddr_to_pfn(ma))
+#define vmap_to_mfn(va)     maddr_to_mfn(virt_to_maddr((vaddr_t)va))
+#define vmap_to_page(va)    mfn_to_page(vmap_to_mfn(va))
+
+extern unsigned long max_page;
+extern unsigned long total_pages;
+extern unsigned long xenheap_base_pdx;
+
+/* Page-align address and convert to frame number format */
+#define paddr_to_pfn_aligned(paddr)    paddr_to_pfn(PAGE_ALIGN(paddr))
+
+static inline void *maddr_to_virt(paddr_t ma)
+{
+    ASSERT((mfn_to_pdx(maddr_to_mfn(ma)) - xenheap_base_pdx) <
+           (DIRECTMAP_SIZE >> PAGE_SHIFT));
+
+    return (void *)(XENHEAP_VIRT_START -
+                    (xenheap_base_pdx << PAGE_SHIFT) +
+                    ((ma & ma_va_bottom_mask) |
+                     ((ma & ma_top_mask) >> pfn_pdx_hole_shift)));
+}
+
+static inline paddr_t __virt_to_maddr(vaddr_t va)
+{
+    unsigned long heap_phys_start = mfn_to_maddr(xenheap_mfn_start);
+
+    /* TODO: Check if this va is a heap frame or not */
+    while (va < XENHEAP_VIRT_START);
+
+    return (paddr_t) (va - XENHEAP_VIRT_START + heap_phys_start);
+}
+
+#define virt_to_maddr(va) __virt_to_maddr((vaddr_t) (va))
+
+/* Convert between Xen-heap virtual addresses and machine frame numbers. */
+#define __virt_to_mfn(va)  paddr_to_pfn((vaddr_t)va)
+#define __mfn_to_virt(mfn) (maddr_to_virt((paddr_t)(mfn) << PAGE_SHIFT))
+
+/*
+ * Page needs to be scrubbed. Since this bit can only be set on a page that is
+ * free (i.e. in PGC_state_free) we can reuse PGC_allocated bit.
+ */
+#define _PGC_need_scrub   _PGC_allocated
+#define PGC_need_scrub    PGC_allocated
+
+/*
+ * We define non-underscored wrappers for above conversion functions.
+ * These are overriden in various source files while underscored version
+ * remain intact.
+ */
+#define virt_to_mfn(va)     __virt_to_mfn(va)
+#define mfn_to_virt(mfn)    __mfn_to_virt(mfn)
+
+/* Convert between Xen-heap virtual addresses and page-info structures. */
+static inline struct page_info *virt_to_page(const void *v)
+{
+    unsigned long va = (unsigned long)v;
+    unsigned long pdx;
+
+    ASSERT(va >= XENHEAP_VIRT_START);
+    ASSERT(va < xenheap_virt_end);
+
+    pdx = (va - XENHEAP_VIRT_START) >> PAGE_SHIFT;
+    pdx += pfn_to_pdx(mfn_x(xenheap_mfn_start));
+    return frame_table + pdx;
+}
+
+static inline void *page_to_virt(const struct page_info *pg)
+{
+    return mfn_to_virt(mfn_x(page_to_mfn(pg)));
+}
+
+#define domain_set_alloc_bitsize(d) ((void)0)
+#define domain_clamp_alloc_bitsize(d, b) (b)
+
+/*
+ * RISC-V does not have an M2P, but common code expects a handful of
+ * M2P-related defines and functions. Provide dummy versions of these.
+ */
+#define INVALID_M2P_ENTRY        (~0UL)
+#define SHARED_M2P_ENTRY         (~0UL - 1UL)
+#define SHARED_M2P(_e)           ((_e) == SHARED_M2P_ENTRY)
+
+/* Xen always owns P2M on RISC-V */
+#define set_gpfn_from_mfn(mfn, pfn) do { (void) (mfn), (void)(pfn); } while (0)
+#define mfn_to_gmfn(_d, mfn)  (mfn)
+
+/* Arch-specific portion of memory_op hypercall. */
+long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg);
+
+extern void put_page_type(struct page_info *page);
+static inline void put_page_and_type(struct page_info *page)
+{
+    put_page_type(page);
+    put_page(page);
+}
+
+int guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn,
+                                          unsigned int order);
+
+unsigned long domain_get_maximum_gpfn(struct domain *d);
+
+static inline
+int arch_acquire_resource(struct domain *d, unsigned int type, unsigned int id,
+                          unsigned long frame, unsigned int nr_frames,
+                          xen_pfn_t mfn_list[])
+{
+    return -EOPNOTSUPP;
+}
+
+/*
+ * On RISC-V, all the RAM is currently direct mapped in Xen.
+ * Hence return always true.
+ */
+static inline bool arch_mfn_in_directmap(unsigned long mfn)
+{
+    return true;
+}
+
+void setup_xenheap_mappings(unsigned long heap_start, unsigned long page_cnt);
+
+void setup_frametable_mappings(paddr_t ps, paddr_t pe);
+
+void __attribute__ ((section(".entry")))
+setup_initial_pagetables(pte_t *second,
+                         pte_t *first,
+                         pte_t *zeroeth,
+                         unsigned long map_start,
+                         unsigned long map_end,
+                         unsigned long pa_start);
+
+
+#endif /*  __ARCH_RISCV_MM__ */
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-riscv/monitor.h b/xen/include/asm-riscv/monitor.h
new file mode 100644
index 0000000000..e77d21dba4
--- /dev/null
+++ b/xen/include/asm-riscv/monitor.h
@@ -0,0 +1,65 @@ 
+/*
+ * include/asm-RISCV/monitor.h
+ *
+ * Arch-specific monitor_op domctl handler.
+ *
+ * Copyright (c) 2015 Tamas K Lengyel (tamas@tklengyel.com)
+ * Copyright (c) 2016, Bitdefender S.R.L.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ASM_RISCV_MONITOR_H__
+#define __ASM_RISCV_MONITOR_H__
+
+#include <xen/sched.h>
+#include <public/domctl.h>
+
+static inline
+void arch_monitor_allow_userspace(struct domain *d, bool allow_userspace)
+{
+}
+
+static inline
+int arch_monitor_domctl_op(struct domain *d, struct xen_domctl_monitor_op *mop)
+{
+    /* No arch-specific monitor ops on RISCV. */
+    return -EOPNOTSUPP;
+}
+
+int arch_monitor_domctl_event(struct domain *d,
+                              struct xen_domctl_monitor_op *mop);
+
+static inline
+int arch_monitor_init_domain(struct domain *d)
+{
+    /* No arch-specific domain initialization on RISCV. */
+    return 0;
+}
+
+static inline
+void arch_monitor_cleanup_domain(struct domain *d)
+{
+    /* No arch-specific domain cleanup on RISCV. */
+}
+
+static inline uint32_t arch_monitor_get_capabilities(struct domain *d)
+{
+    uint32_t capabilities = 0;
+
+    return capabilities;
+}
+
+int monitor_smc(void);
+
+#endif /* __ASM_RISCV_MONITOR_H__ */
diff --git a/xen/include/asm-riscv/nospec.h b/xen/include/asm-riscv/nospec.h
new file mode 100644
index 0000000000..55087fa831
--- /dev/null
+++ b/xen/include/asm-riscv/nospec.h
@@ -0,0 +1,25 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. */
+
+#ifndef _ASM_RISCV_NOSPEC_H
+#define _ASM_RISCV_NOSPEC_H
+
+static inline bool evaluate_nospec(bool condition)
+{
+    return condition;
+}
+
+static inline void block_speculation(void)
+{
+}
+
+#endif /* _ASM_RISCV_NOSPEC_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-riscv/numa.h b/xen/include/asm-riscv/numa.h
new file mode 100644
index 0000000000..52bdfbc16b
--- /dev/null
+++ b/xen/include/asm-riscv/numa.h
@@ -0,0 +1,41 @@ 
+#ifndef __ARCH_RISCV_NUMA_H
+#define __ARCH_RISCV_NUMA_H
+
+#include <xen/mm.h>
+
+typedef u8 nodeid_t;
+
+/* Fake one node for now. See also node_online_map. */
+#define cpu_to_node(cpu) 0
+#define node_to_cpumask(node)   (cpu_online_map)
+
+static inline __attribute__((pure)) nodeid_t phys_to_nid(paddr_t addr)
+{
+    return 0;
+}
+
+/*
+ * TODO: make first_valid_mfn static when NUMA is supported on RISCV, this
+ * is required because the dummy helpers are using it.
+ */
+extern mfn_t first_valid_mfn;
+
+/* XXX: implement NUMA support */
+#define node_spanned_pages(nid) (max_page - mfn_x(first_valid_mfn))
+#define node_start_pfn(nid) (mfn_x(first_valid_mfn))
+#define __node_distance(a, b) (20)
+
+static inline unsigned int arch_get_dma_bitsize(void)
+{
+    return 32;
+}
+
+#endif /* __ARCH_RISCV_NUMA_H */
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-riscv/p2m.h b/xen/include/asm-riscv/p2m.h
new file mode 100644
index 0000000000..9e6db24ea6
--- /dev/null
+++ b/xen/include/asm-riscv/p2m.h
@@ -0,0 +1,410 @@ 
+#ifndef _XEN_P2M_H
+#define _XEN_P2M_H
+
+#include <xen/mm.h>
+#include <xen/radix-tree.h>
+#include <xen/rwlock.h>
+#include <xen/mem_access.h>
+#include <xen/errno.h>
+
+#include <asm/current.h>
+
+#define paddr_bits PADDR_BITS
+
+/* Holds the bit size of IPAs in p2m tables.  */
+extern unsigned int p2m_ipa_bits;
+
+struct domain;
+
+extern void memory_type_changed(struct domain *);
+
+/* Per-p2m-table state */
+struct p2m_domain {
+    /*
+     * Lock that protects updates to the p2m.
+     */
+    rwlock_t lock;
+
+    /* Pages used to construct the p2m */
+    struct page_list_head pages;
+
+    /* The root of the p2m tree. May be concatenated */
+    struct page_info *root;
+
+    /* Current VMID in use */
+    uint16_t vmid;
+
+    /* Current Translation Table Base Register for the p2m */
+    uint64_t vttbr;
+
+    /*
+     * Highest guest frame that's ever been mapped in the p2m
+     * Only takes into account ram and foreign mapping
+     */
+    gfn_t max_mapped_gfn;
+
+    /*
+     * Lowest mapped gfn in the p2m. When releasing mapped gfn's in a
+     * preemptible manner this is update to track recall where to
+     * resume the search. Apart from during teardown this can only
+     * decrease. */
+    gfn_t lowest_mapped_gfn;
+
+    /* Indicate if it is required to clean the cache when writing an entry */
+    bool clean_pte;
+
+    /*
+     * P2M updates may required TLBs to be flushed (invalidated).
+     *
+     * Flushes may be deferred by setting 'need_flush' and then flushing
+     * when the p2m write lock is released.
+     *
+     * If an immediate flush is required (e.g, if a super page is
+     * shattered), call p2m_tlb_flush_sync().
+     */
+    bool need_flush;
+
+    /* Gather some statistics for information purposes only */
+    struct {
+        /* Number of mappings at each p2m tree level */
+        unsigned long mappings[4];
+        /* Number of times we have shattered a mapping
+         * at each p2m tree level. */
+        unsigned long shattered[4];
+    } stats;
+
+    /*
+     * If true, and an access fault comes in and there is no vm_event listener,
+     * pause domain. Otherwise, remove access restrictions.
+     */
+    bool access_required;
+
+    /* Defines if mem_access is in use for the domain. */
+    bool mem_access_enabled;
+
+    /*
+     * Default P2M access type for each page in the the domain: new pages,
+     * swapped in pages, cleared pages, and pages that are ambiguously
+     * retyped get this access type. See definition of p2m_access_t.
+     */
+    p2m_access_t default_access;
+
+    /*
+     * Radix tree to store the p2m_access_t settings as the pte's don't have
+     * enough available bits to store this information.
+     */
+    struct radix_tree_root mem_access_settings;
+
+    /* back pointer to domain */
+    struct domain *domain;
+
+    /* Keeping track on which CPU this p2m was used and for which vCPU */
+    uint8_t last_vcpu_ran[NR_CPUS];
+};
+
+/*
+ * List of possible type for each page in the p2m entry.
+ * The number of available bit per page in the pte for this purpose is 4 bits.
+ * So it's possible to only have 16 fields. If we run out of value in the
+ * future, it's possible to use higher value for pseudo-type and don't store
+ * them in the p2m entry.
+ */
+typedef enum {
+    p2m_invalid = 0,    /* Nothing mapped here */
+    p2m_ram_rw,         /* Normal read/write guest RAM */
+    p2m_ram_ro,         /* Read-only; writes are silently dropped */
+    p2m_mmio_direct_dev,/* Read/write mapping of genuine Device MMIO area */
+    p2m_mmio_direct_nc, /* Read/write mapping of genuine MMIO area non-cacheable */
+    p2m_mmio_direct_c,  /* Read/write mapping of genuine MMIO area cacheable */
+    p2m_map_foreign_rw, /* Read/write RAM pages from foreign domain */
+    p2m_map_foreign_ro, /* Read-only RAM pages from foreign domain */
+    p2m_grant_map_rw,   /* Read/write grant mapping */
+    p2m_grant_map_ro,   /* Read-only grant mapping */
+    /* The types below are only used to decide the page attribute in the P2M */
+    p2m_iommu_map_rw,   /* Read/write iommu mapping */
+    p2m_iommu_map_ro,   /* Read-only iommu mapping */
+    p2m_max_real_type,  /* Types after this won't be store in the p2m */
+} p2m_type_t;
+
+/* We use bitmaps and mask to handle groups of types */
+#define p2m_to_mask(_t) (1UL << (_t))
+
+/* RAM types, which map to real machine frames */
+#define P2M_RAM_TYPES (p2m_to_mask(p2m_ram_rw) |        \
+                       p2m_to_mask(p2m_ram_ro))
+
+/* Grant mapping types, which map to a real frame in another VM */
+#define P2M_GRANT_TYPES (p2m_to_mask(p2m_grant_map_rw) |  \
+                         p2m_to_mask(p2m_grant_map_ro))
+
+/* Foreign mappings types */
+#define P2M_FOREIGN_TYPES (p2m_to_mask(p2m_map_foreign_rw) | \
+                           p2m_to_mask(p2m_map_foreign_ro))
+
+/* Useful predicates */
+#define p2m_is_ram(_t) (p2m_to_mask(_t) & P2M_RAM_TYPES)
+#define p2m_is_foreign(_t) (p2m_to_mask(_t) & P2M_FOREIGN_TYPES)
+#define p2m_is_any_ram(_t) (p2m_to_mask(_t) &                   \
+                            (P2M_RAM_TYPES | P2M_GRANT_TYPES |  \
+                             P2M_FOREIGN_TYPES))
+
+/* All common type definitions should live ahead of this inclusion. */
+#ifdef _XEN_P2M_COMMON_H
+# error "xen/p2m-common.h should not be included directly"
+#endif
+#include <xen/p2m-common.h>
+
+static inline
+void p2m_altp2m_check(struct vcpu *v, uint16_t idx)
+{
+    /* Not supported on ARM. */
+}
+
+/* Second stage paging setup, to be called on all CPUs */
+void setup_virt_paging(void);
+
+/* Init the datastructures for later use by the p2m code */
+int p2m_init(struct domain *d);
+
+/* Return all the p2m resources to Xen. */
+void p2m_teardown(struct domain *d);
+
+/*
+ * Remove mapping refcount on each mapping page in the p2m
+ *
+ * TODO: For the moment only foreign mappings are handled
+ */
+int relinquish_p2m_mapping(struct domain *d);
+
+/* Context switch */
+void p2m_save_state(struct vcpu *p);
+void p2m_restore_state(struct vcpu *n);
+
+/* Print debugging/statistial info about a domain's p2m */
+void p2m_dump_info(struct domain *d);
+
+static inline void p2m_write_lock(struct p2m_domain *p2m)
+{
+    write_lock(&p2m->lock);
+}
+
+void p2m_write_unlock(struct p2m_domain *p2m);
+
+static inline void p2m_read_lock(struct p2m_domain *p2m)
+{
+    read_lock(&p2m->lock);
+}
+
+static inline void p2m_read_unlock(struct p2m_domain *p2m)
+{
+    read_unlock(&p2m->lock);
+}
+
+static inline int p2m_is_locked(struct p2m_domain *p2m)
+{
+    return rw_is_locked(&p2m->lock);
+}
+
+static inline int p2m_is_write_locked(struct p2m_domain *p2m)
+{
+    return rw_is_write_locked(&p2m->lock);
+}
+
+void p2m_tlb_flush_sync(struct p2m_domain *p2m);
+
+/* Look up the MFN corresponding to a domain's GFN. */
+mfn_t p2m_lookup(struct domain *d, gfn_t gfn, p2m_type_t *t);
+
+/*
+ * Get details of a given gfn.
+ * The P2M lock should be taken by the caller.
+ */
+mfn_t p2m_get_entry(struct p2m_domain *p2m, gfn_t gfn,
+                    p2m_type_t *t, p2m_access_t *a,
+                    unsigned int *page_order,
+                    bool *valid);
+
+/*
+ * Direct set a p2m entry: only for use by the P2M code.
+ * The P2M write lock should be taken.
+ */
+int p2m_set_entry(struct p2m_domain *p2m,
+                  gfn_t sgfn,
+                  unsigned long nr,
+                  mfn_t smfn,
+                  p2m_type_t t,
+                  p2m_access_t a);
+
+bool p2m_resolve_translation_fault(struct domain *d, gfn_t gfn);
+
+void p2m_invalidate_root(struct p2m_domain *p2m);
+
+/*
+ * Clean & invalidate caches corresponding to a region [start,end) of guest
+ * address space.
+ *
+ * start will get updated if the function is preempted.
+ */
+int p2m_cache_flush_range(struct domain *d, gfn_t *pstart, gfn_t end);
+
+void p2m_set_way_flush(struct vcpu *v);
+
+void p2m_toggle_cache(struct vcpu *v, bool was_enabled);
+
+void p2m_flush_vm(struct vcpu *v);
+
+/*
+ * Map a region in the guest p2m with a specific p2m type.
+ * The memory attributes will be derived from the p2m type.
+ */
+int map_regions_p2mt(struct domain *d,
+                     gfn_t gfn,
+                     unsigned long nr,
+                     mfn_t mfn,
+                     p2m_type_t p2mt);
+
+int unmap_regions_p2mt(struct domain *d,
+                       gfn_t gfn,
+                       unsigned long nr,
+                       mfn_t mfn);
+
+int map_dev_mmio_region(struct domain *d,
+                        gfn_t gfn,
+                        unsigned long nr,
+                        mfn_t mfn);
+
+int guest_physmap_add_entry(struct domain *d,
+                            gfn_t gfn,
+                            mfn_t mfn,
+                            unsigned long page_order,
+                            p2m_type_t t);
+
+/* Untyped version for RAM only, for compatibility */
+static inline int guest_physmap_add_page(struct domain *d,
+                                         gfn_t gfn,
+                                         mfn_t mfn,
+                                         unsigned int page_order)
+{
+    return guest_physmap_add_entry(d, gfn, mfn, page_order, p2m_ram_rw);
+}
+
+mfn_t gfn_to_mfn(struct domain *d, gfn_t gfn);
+
+/* Look up a GFN and take a reference count on the backing page. */
+typedef unsigned int p2m_query_t;
+#define P2M_ALLOC    (1u<<0)   /* Populate PoD and paged-out entries */
+#define P2M_UNSHARE  (1u<<1)   /* Break CoW sharing */
+
+struct page_info *p2m_get_page_from_gfn(struct domain *d, gfn_t gfn,
+                                        p2m_type_t *t);
+
+static inline struct page_info *get_page_from_gfn(
+    struct domain *d, unsigned long gfn, p2m_type_t *t, p2m_query_t q)
+{
+    mfn_t mfn;
+    p2m_type_t _t;
+    struct page_info *page;
+
+    /*
+     * Special case for DOMID_XEN as it is the only domain so far that is
+     * not auto-translated.
+     */
+    if ( likely(d != dom_xen) )
+        return p2m_get_page_from_gfn(d, _gfn(gfn), t);
+
+    if ( !t )
+        t = &_t;
+
+    *t = p2m_invalid;
+
+    /*
+     * DOMID_XEN sees 1-1 RAM. The p2m_type is based on the type of the
+     * page.
+     */
+    mfn = _mfn(gfn);
+    page = mfn_to_page(mfn);
+
+    if ( !mfn_valid(mfn) || !get_page(page, d) )
+        return NULL;
+
+    if ( page->u.inuse.type_info & PGT_writable_page )
+        *t = p2m_ram_rw;
+    else
+        *t = p2m_ram_ro;
+
+    return page;
+}
+
+int get_page_type(struct page_info *page, unsigned long type);
+bool is_iomem_page(mfn_t mfn);
+static inline int get_page_and_type(struct page_info *page,
+                                    struct domain *domain,
+                                    unsigned long type)
+{
+    int rc = get_page(page, domain);
+
+    if ( likely(rc) && unlikely(!get_page_type(page, type)) )
+    {
+        put_page(page);
+        rc = 0;
+    }
+
+    return rc;
+}
+
+/* get host p2m table */
+#define p2m_get_hostp2m(d) (&(d)->arch.p2m)
+
+static inline bool p2m_vm_event_sanity_check(struct domain *d)
+{
+    return true;
+}
+
+/*
+ * Return the start of the next mapping based on the order of the
+ * current one.
+ */
+static inline gfn_t gfn_next_boundary(gfn_t gfn, unsigned int order)
+{
+    /*
+     * The order corresponds to the order of the mapping (or invalid
+     * range) in the page table. So we need to align the GFN before
+     * incrementing.
+     */
+    gfn = _gfn(gfn_x(gfn) & ~((1UL << order) - 1));
+
+    return gfn_add(gfn, 1UL << order);
+}
+
+static inline int set_foreign_p2m_entry(struct domain *d, unsigned long gfn,
+                                        mfn_t mfn)
+{
+    /*
+     * NOTE: If this is implemented then proper reference counting of
+     *       foreign entries will need to be implemented.
+     */
+    return -EOPNOTSUPP;
+}
+
+/*
+ * A vCPU has cache enabled only when the MMU is enabled and data cache
+ * is enabled.
+ */
+static inline bool vcpu_has_cache_enabled(struct vcpu *v)
+{
+    /* Only works with the current vCPU */
+    ASSERT(current == v);
+
+    return 0;
+}
+
+#endif /* _XEN_P2M_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-riscv/page.h b/xen/include/asm-riscv/page.h
new file mode 100644
index 0000000000..67c60e8fcd
--- /dev/null
+++ b/xen/include/asm-riscv/page.h
@@ -0,0 +1,327 @@ 
+/*
+ * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ * Copyright (C) 2017 XiaojingZhu <zhuxiaoj@ict.ac.cn>
+ * Copyright (C) 2019 Bobby Eshleman <bobbyeshleman@gmail.com>
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_PAGE_H
+#define _ASM_RISCV_PAGE_H
+
+#include <public/xen.h>
+#include <xen/const.h>
+#include <xen/config.h>
+#include <asm/riscv_encoding.h>
+#include <asm/asm.h>
+
+/*
+ * PAGE_OFFSET -- the first address of the first page of memory.
+ * When not using MMU this corresponds to the first free page in
+ * physical memory (aligned on a page boundary).
+ */
+#define PAGE_OFFSET		_AC(CONFIG_PAGE_OFFSET, UL)
+
+#define KERN_VIRT_SIZE (-PAGE_OFFSET)
+
+#define PAGE_ENTRIES           512
+#define VPN_BITS               (9)
+#define VPN_MASK               ((unsigned long)((1 << VPN_BITS) - 1))
+
+#ifdef CONFIG_RISCV_64
+/* L3 index Bit[47:39] */
+#define THIRD_SHIFT            (39)
+#define THIRD_MASK             (VPN_MASK << THIRD_SHIFT)
+/* L2 index Bit[38:30] */
+#define SECOND_SHIFT           (30)
+#define SECOND_MASK            (VPN_MASK << SECOND_SHIFT)
+/* L1 index Bit[29:21] */
+#define FIRST_SHIFT            (21)
+#define FIRST_MASK             (VPN_MASK << FIRST_SHIFT)
+/* L0 index Bit[20:12] */
+#define ZEROETH_SHIFT          (12)
+#define ZEROETH_MASK           (VPN_MASK << ZEROETH_SHIFT)
+
+#else // CONFIG_RISCV_32
+
+/* L1 index Bit[31:22] */
+#define FIRST_SHIFT            (22)
+#define FIRST_MASK             (VPN_MASK << FIRST_SHIFT)
+
+/* L0 index Bit[21:12] */
+#define ZEROETH_SHIFT          (12)
+#define ZEROETH_MASK           (VPN_MASK << ZEROETH_SHIFT)
+#endif
+
+#define THIRD_SIZE             (1 << THIRD_SHIFT)
+#define THIRD_MAP_MASK         (~(THIRD_SIZE - 1))
+#define SECOND_SIZE            (1 << SECOND_SHIFT)
+#define SECOND_MAP_MASK        (~(SECOND_SIZE - 1))
+#define FIRST_SIZE             (1 << FIRST_SHIFT)
+#define FIRST_MAP_MASK         (~(FIRST_SIZE - 1))
+#define ZEROETH_SIZE           (1 << ZEROETH_SHIFT)
+#define ZEROETH_MAP_MASK       (~(ZEROETH_SIZE - 1))
+
+#define PTE_ADDR_MASK          0x003FFFFFFFFFFC00ULL
+#define PTE_SHIFT              10
+#define PTE_RSW_MASK           0x0000000000000300ULL
+#define PTE_RSW_SHIFT          8
+
+#define PTE_USER_SHIFT         4
+#define PTE_PERM_MASK                (PTE_EXECUTE_MASK | \
+                                      PTE_WRITE_MASK | \
+                                      PTE_READ_MASK)
+
+#define PTE_VALID       BIT(0, UL)
+#define PTE_READABLE    BIT(1, UL)
+#define PTE_WRITABLE    BIT(2, UL)
+#define PTE_EXECUTABLE  BIT(3, UL)
+#define PTE_USER        BIT(4, UL)
+#define PTE_GLOBAL      BIT(5, UL)
+#define PTE_ACCESSED    BIT(6, UL)
+#define PTE_DIRTY       BIT(7, UL)
+#define PTE_RSW         (BIT(8, UL) | BIT(9, UL))
+
+#define PTE_LEAF_DEFAULT (PTE_VALID | PTE_READABLE | PTE_WRITABLE | PTE_EXECUTABLE)
+#define PTE_TABLE (PTE_VALID)
+
+/* Calculate the offsets into the pagetables for a given VA */
+#define zeroeth_linear_offset(va) ((va) >> ZEROETH_SHIFT)
+#define first_linear_offset(va) ((va) >> FIRST_SHIFT)
+#define second_linear_offset(va) ((va) >> SECOND_SHIFT)
+#define third_linear_offset(va) ((va) >> THIRD_SHIFT)
+
+#define pagetable_zeroeth_index(va) zeroeth_linear_offset((va) & ZEROETH_MASK)
+#define pagetable_first_index(va) first_linear_offset((va) & FIRST_MASK)
+#define pagetable_second_index(va) second_linear_offset((va) & SECOND_MASK)
+#define pagetable_third_index(va) third_linear_offset((va) & THIRD_MASK)
+
+#ifndef __ASSEMBLY__
+
+#define PAGE_UP(addr)	(((addr)+((PAGE_SIZE)-1))&(~((PAGE_SIZE)-1)))
+#define PAGE_DOWN(addr)	((addr)&(~((PAGE_SIZE)-1)))
+
+/* align addr on a size boundary - adjust address up/down if needed */
+#define _ALIGN_UP(addr, size)	(((addr)+((size)-1))&(~((size)-1)))
+#define _ALIGN_DOWN(addr, size)	((addr)&(~((size)-1)))
+
+/* align addr on a size boundary - adjust address up if needed */
+#define _ALIGN(addr, size)	_ALIGN_UP(addr, size)
+
+#define clear_page(pgaddr)			memset((pgaddr), 0, PAGE_SIZE)
+#define copy_page(to, from)			memcpy((to), (from), PAGE_SIZE)
+
+#define clear_user_page(pgaddr, vaddr, page)	memset((pgaddr), 0, PAGE_SIZE)
+#define copy_user_page(vto, vfrom, vaddr, topg) \
+			memcpy((vto), (vfrom), PAGE_SIZE)
+
+/*
+ * Attribute Indexes.
+ *
+ */
+#define MT_NORMAL        0x0
+
+#define _PAGE_XN_BIT    3
+#define _PAGE_RO_BIT    4
+#define _PAGE_XN    (1U << _PAGE_XN_BIT)
+#define _PAGE_RO    (1U << _PAGE_RO_BIT)
+#define PAGE_XN_MASK(x) (((x) >> _PAGE_XN_BIT) & 0x1U)
+#define PAGE_RO_MASK(x) (((x) >> _PAGE_RO_BIT) & 0x1U)
+
+/*
+ * _PAGE_DEVICE and _PAGE_NORMAL are convenience defines. They are not
+ * meant to be used outside of this header.
+ */
+#define _PAGE_DEVICE    _PAGE_XN
+#define _PAGE_NORMAL    MT_NORMAL
+
+#define PAGE_HYPERVISOR_RO      (_PAGE_NORMAL|_PAGE_RO|_PAGE_XN)
+#define PAGE_HYPERVISOR_RX      (_PAGE_NORMAL|_PAGE_RO)
+#define PAGE_HYPERVISOR_RW      (_PAGE_NORMAL|_PAGE_XN)
+
+#define PAGE_HYPERVISOR         PAGE_HYPERVISOR_RW
+#define PAGE_HYPERVISOR_NOCACHE (_PAGE_DEVICE)
+#define PAGE_HYPERVISOR_WC      (_PAGE_DEVICE)
+
+/* Invalidate all instruction caches in Inner Shareable domain to PoU */
+static inline void invalidate_icache(void)
+{
+    asm volatile ("fence.i" ::: "memory");
+}
+
+static inline int invalidate_dcache_va_range(const void *p, unsigned long size)
+{
+	/* TODO */
+	return 0;
+}
+
+static inline int clean_dcache_va_range(const void *p, unsigned long size)
+{
+    /* TODO */
+    return 0;
+}
+
+static inline int clean_and_invalidate_dcache_va_range
+    (const void *p, unsigned long size)
+{
+	/* TODO */
+    return 0;
+}
+
+/*
+ * Use struct definitions to apply C type checking
+ */
+
+/* Page Global Directory entry */
+typedef struct {
+	unsigned long pgd;
+} pgd_t;
+
+/* Page Table entry */
+typedef struct {
+    uint64_t pte;
+} pte_t;
+
+typedef struct {
+	unsigned long pgprot;
+} pgprot_t;
+
+typedef struct page *pgtable_t;
+
+#define pte_val(x)	((x).pte)
+#define pgd_val(x)	((x).pgd)
+#define pgprot_val(x)	((x).pgprot)
+
+static inline bool pte_is_table(pte_t *p)
+{
+    return (((p->pte) & (PTE_VALID
+                        | PTE_READABLE
+                        | PTE_WRITABLE
+                        | PTE_EXECUTABLE)) == PTE_VALID);
+}
+
+static inline bool pte_is_valid(pte_t *p)
+{
+    return p->pte & PTE_VALID;
+}
+
+static inline bool pte_is_leaf(pte_t *p)
+{
+    return (p->pte & (PTE_WRITABLE | PTE_READABLE | PTE_EXECUTABLE));
+}
+
+/* Shift the VPN[x] or PPN[x] fields of a virtual or physical address
+ * to become the shifted PPN[x] fields of a page table entry */
+#define addr_to_ppn(x) (((x) >> PAGE_SHIFT) << PTE_SHIFT)
+
+static inline pte_t paddr_to_pte(unsigned long paddr)
+{
+    return (pte_t) { .pte = addr_to_ppn(paddr) };
+}
+
+static inline paddr_t pte_to_paddr(pte_t *p)
+{
+     return (paddr_t) ((p->pte >> PTE_SHIFT) << PAGE_SHIFT);
+}
+
+#define pte_get_mfn(pte_)      _mfn(((pte_).pte) >> PTE_SHIFT)
+
+#define MEGAPAGE_ALIGN(x) ((x) & FIRST_MAP_MASK)
+#define GIGAPAGE_ALIGN(x) ((x) & SECOND_MAP_MASK)
+
+#define paddr_to_megapage_ppn(x) addr_to_ppn(MEGAPAGE_ALIGN(x))
+#define paddr_to_gigapage_ppn(x) addr_to_ppn(GIGAPAGE_ALIGN(x))
+
+#define __pte(x)	((pte_t) { (x) })
+#define __pgd(x)	((pgd_t) { (x) })
+#define __pgprot(x)	((pgprot_t) { (x) })
+
+#ifdef CONFIG_64BIT
+#define PTE_FMT "%016lx"
+#else
+#define PTE_FMT "%08lx"
+#endif
+
+extern unsigned long va_pa_offset;
+extern unsigned long pfn_base;
+
+extern unsigned long max_low_pfn;
+extern unsigned long min_low_pfn;
+
+#define __pa(x)		((unsigned long)(x) - va_pa_offset)
+#define __va(x)		((void *)((unsigned long) (x) + va_pa_offset))
+
+#define pfn_valid(pfn) \
+	(((pfn) >= pfn_base) && (((pfn)-pfn_base) < max_mapnr))
+
+#define ARCH_PFN_OFFSET		(pfn_base)
+
+#endif /* __ASSEMBLY__ */
+
+#define PAGE_ALIGN(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK)
+
+#define virt_addr_valid(vaddr)	(pfn_valid(virt_to_pfn(vaddr)))
+
+#define VM_DATA_DEFAULT_FLAGS	(VM_READ | VM_WRITE | \
+				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+/* Flush the dcache for an entire page. */
+void flush_page_to_ram(unsigned long mfn, bool sync_icache);
+
+static inline uint64_t va_to_par(vaddr_t va)
+{
+    register unsigned long __mepc asm ("a2") = va;
+    register unsigned long __mstatus asm ("a3");
+    register unsigned long __bsstatus asm ("a4");
+    unsigned long val;
+    unsigned long rvc_mask = 3, tmp;
+    asm ("csrrs %[mstatus], "STR(CSR_MSTATUS)", %[mprv]\n"
+        "csrrs %[bsstatus], "STR(CSR_BSSTATUS)", %[smxr]\n"
+        "and %[tmp], %[addr], 2\n"
+        "bnez %[tmp], 1f\n"
+#if RISCV_64
+        STR(LWU) " %[insn], (%[addr])\n"
+#else
+        STR(LW) " %[insn], (%[addr])\n"
+#endif
+        "and %[tmp], %[insn], %[rvc_mask]\n"
+        "beq %[tmp], %[rvc_mask], 2f\n"
+        "sll %[insn], %[insn], %[xlen_minus_16]\n"
+        "srl %[insn], %[insn], %[xlen_minus_16]\n"
+        "j 2f\n"
+        "1:\n"
+        "lhu %[insn], (%[addr])\n"
+        "and %[tmp], %[insn], %[rvc_mask]\n"
+        "bne %[tmp], %[rvc_mask], 2f\n"
+        "lhu %[tmp], 2(%[addr])\n"
+        "sll %[tmp], %[tmp], 16\n"
+        "add %[insn], %[insn], %[tmp]\n"
+        "2: csrw "STR(CSR_BSSTATUS)", %[bsstatus]\n"
+        "csrw "STR(CSR_MSTATUS)", %[mstatus]"
+    : [mstatus] "+&r" (__mstatus), [bsstatus] "+&r" (__bsstatus),
+      [insn] "=&r" (val), [tmp] "=&r" (tmp)
+    : [mprv] "r" (MSTATUS_MPRV | SSTATUS_MXR), [smxr] "r" (SSTATUS_MXR),
+      [addr] "r" (__mepc), [rvc_mask] "r" (rvc_mask),
+      [xlen_minus_16] "i" (__riscv_xlen - 16));
+
+    return val;
+}
+
+/* Write a pagetable entry. */
+static inline void write_pte(pte_t *p, pte_t pte)
+{
+    asm volatile ("sfence.vma");
+    *p = pte;
+    asm volatile ("sfence.vma");
+}
+
+#endif /* _ASM_RISCV_PAGE_H */
diff --git a/xen/include/asm-riscv/paging.h b/xen/include/asm-riscv/paging.h
new file mode 100644
index 0000000000..6d1a000246
--- /dev/null
+++ b/xen/include/asm-riscv/paging.h
@@ -0,0 +1,16 @@ 
+#ifndef _XEN_PAGING_H
+#define _XEN_PAGING_H
+
+#define paging_mode_translate(d)              (1)
+#define paging_mode_external(d)               (1)
+
+#endif /* XEN_PAGING_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-riscv/pci.h b/xen/include/asm-riscv/pci.h
new file mode 100644
index 0000000000..0ccf335e34
--- /dev/null
+++ b/xen/include/asm-riscv/pci.h
@@ -0,0 +1,31 @@ 
+/******************************************************************************
+ *
+ * Copyright 2019 (C) Alistair Francis <alistair.francis@wdc.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RISCV_PCI_H__
+#define __RISCV_PCI_H__
+
+struct arch_pci_dev {
+};
+
+#endif /* __RISCV_PCI_H__ */
diff --git a/xen/include/asm-riscv/percpu.h b/xen/include/asm-riscv/percpu.h
new file mode 100644
index 0000000000..9bf4ffcae3
--- /dev/null
+++ b/xen/include/asm-riscv/percpu.h
@@ -0,0 +1,34 @@ 
+#ifndef __RISCV_PERCPU_H__
+#define __RISCV_PERCPU_H__
+
+#ifndef __ASSEMBLY__
+
+#include <xen/types.h>
+#include <asm/csr.h>
+#include <asm/sysregs.h>
+
+extern char __per_cpu_start[], __per_cpu_data_end[];
+extern unsigned long __per_cpu_offset[NR_CPUS];
+void percpu_init_areas(void);
+
+#define per_cpu(var, cpu)  \
+    (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]))
+#define this_cpu(var) \
+    (*RELOC_HIDE(&per_cpu__##var, csr_read(sscratch)))
+
+#define per_cpu_ptr(var, cpu)  \
+    (*RELOC_HIDE(var, __per_cpu_offset[cpu]))
+#define this_cpu_ptr(var) \
+    (*RELOC_HIDE(var, csr_read(sscratch)))
+
+#endif
+
+#endif /* __RISCV_PERCPU_H__ */
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-riscv/pgtable-bits.h b/xen/include/asm-riscv/pgtable-bits.h
new file mode 100644
index 0000000000..f8a4ded2bb
--- /dev/null
+++ b/xen/include/asm-riscv/pgtable-bits.h
@@ -0,0 +1,53 @@ 
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_PGTABLE_BITS_H
+#define _ASM_RISCV_PGTABLE_BITS_H
+
+/*
+ * PTE format:
+ * | XLEN-1  10 | 9             8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
+ *       PFN      reserved for SW   D   A   G   U   X   W   R   V
+ */
+
+#define _PAGE_ACCESSED_OFFSET 6
+
+#define _PAGE_PRESENT   (1 << 0)
+#define _PAGE_READ      (1 << 1)    /* Readable */
+#define _PAGE_WRITE     (1 << 2)    /* Writable */
+#define _PAGE_EXEC      (1 << 3)    /* Executable */
+#define _PAGE_USER      (1 << 4)    /* User */
+#define _PAGE_GLOBAL    (1 << 5)    /* Global */
+#define _PAGE_ACCESSED  (1 << 6)    /* Set by hardware on any access */
+#define _PAGE_DIRTY     (1 << 7)    /* Set by hardware on any write */
+#define _PAGE_SOFT      (1 << 8)    /* Reserved for software */
+
+#define _PAGE_SPECIAL   _PAGE_SOFT
+#define _PAGE_TABLE     _PAGE_PRESENT
+
+/*
+ * _PAGE_PROT_NONE is set on not-present pages (and ignored by the hardware) to
+ * distinguish them from swapped out pages
+ */
+#define _PAGE_PROT_NONE _PAGE_READ
+
+#define _PAGE_PFN_SHIFT 10
+
+/* Set of bits to preserve across pte_modify() */
+#define _PAGE_CHG_MASK  (~(unsigned long)(_PAGE_PRESENT | _PAGE_READ |	\
+					  _PAGE_WRITE | _PAGE_EXEC |	\
+					  _PAGE_USER | _PAGE_GLOBAL))
+
+#define PAGE_AI_MASK(x) ((x) & _PAGE_CHG_MASK)
+
+#endif /* _ASM_RISCV_PGTABLE_BITS_H */
diff --git a/xen/include/asm-riscv/processor.h b/xen/include/asm-riscv/processor.h
new file mode 100644
index 0000000000..194e81a62f
--- /dev/null
+++ b/xen/include/asm-riscv/processor.h
@@ -0,0 +1,60 @@ 
+/******************************************************************************
+ *
+ * Copyright 2019 (C) Alistair Francis <alistair.francis@wdc.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _ASM_RISCV_PROCESSOR_H
+#define _ASM_RISCV_PROCESSOR_H
+
+#ifndef __ASSEMBLY__
+
+/* On stack VCPU state */
+struct cpu_user_regs
+{
+	unsigned long r0;
+};
+
+void show_execution_state(const struct cpu_user_regs *regs);
+void show_registers(const struct cpu_user_regs *regs);
+
+/* All a bit UP for the moment */
+#define cpu_to_core(_cpu)   (0)
+#define cpu_to_socket(_cpu) (0)
+
+/* Based on Linux: arch/riscv/include/asm/processor.h */
+
+static inline void cpu_relax(void)
+{
+	int dummy;
+	/* In lieu of a halt instruction, induce a long-latency stall. */
+	__asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
+	barrier();
+}
+
+static inline void wait_for_interrupt(void)
+{
+	__asm__ __volatile__ ("wfi");
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_RISCV_PROCESSOR_H */
diff --git a/xen/include/asm-riscv/random.h b/xen/include/asm-riscv/random.h
new file mode 100644
index 0000000000..b4acee276b
--- /dev/null
+++ b/xen/include/asm-riscv/random.h
@@ -0,0 +1,9 @@ 
+#ifndef __ASM_RANDOM_H__
+#define __ASM_RANDOM_H__
+
+static inline unsigned int arch_get_random(void)
+{
+    return 0;
+}
+
+#endif /* __ASM_RANDOM_H__ */
diff --git a/xen/include/asm-riscv/regs.h b/xen/include/asm-riscv/regs.h
new file mode 100644
index 0000000000..103cf1e91e
--- /dev/null
+++ b/xen/include/asm-riscv/regs.h
@@ -0,0 +1,42 @@ 
+#ifndef __ARM_REGS_H__
+#define __ARM_REGS_H__
+
+#define PSR_MODE_MASK 0x1f
+
+#ifndef __ASSEMBLY__
+
+#include <xen/lib.h>
+#include <xen/types.h>
+#include <public/xen.h>
+#include <asm/current.h>
+#include <asm/processor.h>
+
+#define hyp_mode(r)     (0)
+
+static inline bool guest_mode(const struct cpu_user_regs *r)
+{
+    unsigned long diff = (char *)guest_cpu_user_regs() - (char *)(r);
+    /* Frame pointer must point into current CPU stack. */
+    ASSERT(diff < STACK_SIZE);
+    /* If not a guest frame, it must be a hypervisor frame. */
+    ASSERT((diff == 0) || hyp_mode(r));
+    /* Return TRUE if it's a guest frame. */
+    return (diff == 0);
+}
+
+#define return_reg(v) ((v)->arch.cpu_info->guest_cpu_user_regs.r0)
+
+register_t get_user_reg(struct cpu_user_regs *regs, int reg);
+void set_user_reg(struct cpu_user_regs *regs, int reg, register_t val);
+
+#endif
+
+#endif /* __ARM_REGS_H__ */
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-riscv/riscv_encoding.h b/xen/include/asm-riscv/riscv_encoding.h
new file mode 100644
index 0000000000..c1e022cd96
--- /dev/null
+++ b/xen/include/asm-riscv/riscv_encoding.h
@@ -0,0 +1,682 @@ 
+/**
+ * Copyright (c) 2018 Anup Patel.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * @file riscv_encoding.h
+ * @author Anup Patel (anup@brainfault.org)
+ * @brief CSR macros & defines for shared by all C & Assembly code
+ *
+ * The source has been largely adapted from OpenSBI 0.3 or higher:
+ * includ/sbi/riscv_encodnig.h
+ *
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ *   Anup Patel <anup.patel@wdc.com>
+ *
+ * The original code is licensed under the BSD 2-clause license.
+ */
+
+#ifndef __RISCV_ENCODING_H__
+#define __RISCV_ENCODING_H__
+
+#define MSTATUS_UIE			0x00000001
+#define MSTATUS_SIE			0x00000002
+#define MSTATUS_HIE			0x00000004
+#define MSTATUS_MIE			0x00000008
+#define MSTATUS_UPIE			0x00000010
+#define MSTATUS_SPIE_SHIFT		5
+#define MSTATUS_SPIE			(1UL << MSTATUS_SPIE_SHIFT)
+#define MSTATUS_HPIE			0x00000040
+#define MSTATUS_MPIE			0x00000080
+#define MSTATUS_SPP_SHIFT		8
+#define MSTATUS_SPP			(1UL << MSTATUS_SPP_SHIFT)
+#define MSTATUS_HPP			0x00000600
+#define MSTATUS_MPP_SHIFT		11
+#define MSTATUS_MPP			(3UL << MSTATUS_MPP_SHIFT)
+#define MSTATUS_FS			0x00006000
+#define MSTATUS_XS			0x00018000
+#define MSTATUS_MPRV			0x00020000
+#define MSTATUS_SUM			0x00040000
+#define MSTATUS_MXR			0x00080000
+#define MSTATUS_TVM			0x00100000
+#define MSTATUS_TW			0x00200000
+#define MSTATUS_TSR			0x00400000
+#define MSTATUS32_SD			0x80000000
+#define MSTATUS_UXL			0x0000000300000000
+#define MSTATUS_SXL			0x0000000C00000000
+#define MSTATUS64_SD			0x8000000000000000
+
+/* Status register flags */
+#define SSTATUS_UIE			_AC(0x00000001, UL)
+#define SSTATUS_SIE			_AC(0x00000002, UL)
+#define SSTATUS_UPIE			_AC(0x00000010, UL)
+#define SSTATUS_SPIE			_AC(0x00000020, UL)
+#define SSTATUS_SPP			_AC(0x00000100, UL)
+#define SSTATUS_SUM			_AC(0x00040000, UL)
+#define SSTATUS_MXR			_AC(0x00080000, UL)
+
+#define SSTATUS_FS			_AC(0x00006000, UL)
+#define SSTATUS_FS_OFF			_AC(0x00000000, UL)
+#define SSTATUS_FS_INITIAL		_AC(0x00002000, UL)
+#define SSTATUS_FS_CLEAN		_AC(0x00004000, UL)
+#define SSTATUS_FS_DIRTY		_AC(0x00006000, UL)
+
+#define SSTATUS_XS			_AC(0x00018000, UL)
+#define SSTATUS_XS_OFF			_AC(0x00000000, UL)
+#define SSTATUS_XS_INITIAL		_AC(0x00008000, UL)
+#define SSTATUS_XS_CLEAN		_AC(0x00010000, UL)
+#define SSTATUS_XS_DIRTY		_AC(0x00018000, UL)
+
+#define SSTATUS32_SD			_AC(0x80000000, UL)
+#define SSTATUS64_SD			_AC(0x8000000000000000, UL)
+
+#define SSTATUS64_UXL			_AC(0x0000000300000000, UL)
+
+#ifdef CONFIG_64BIT
+#define MSTATUS_SD			MSTATUS64_SD
+#define SSTATUS_SD			SSTATUS64_SD
+#else
+#define MSTATUS_SD			MSTATUS32_SD
+#define SSTATUS_SD			SSTATUS32_SD
+#endif
+
+#define HSTATUS_VTSR			_AC(0x00400000, UL)
+#define HSTATUS_VTVM			_AC(0x00100000, UL)
+#define HSTATUS_SP2V			_AC(0x00000200, UL)
+#define HSTATUS_SP2P			_AC(0x00000100, UL)
+#define HSTATUS_SPV			_AC(0x00000080, UL)
+#define HSTATUS_STL			_AC(0x00000040, UL)
+#define HSTATUS_SPRV			_AC(0x00000001, UL)
+
+#define DCSR_XDEBUGVER			(3U<<30)
+#define DCSR_NDRESET			(1<<29)
+#define DCSR_FULLRESET			(1<<28)
+#define DCSR_EBREAKM			(1<<15)
+#define DCSR_EBREAKH			(1<<14)
+#define DCSR_EBREAKS			(1<<13)
+#define DCSR_EBREAKU			(1<<12)
+#define DCSR_STOPCYCLE			(1<<10)
+#define DCSR_STOPTIME			(1<<9)
+#define DCSR_CAUSE			(7<<6)
+#define DCSR_DEBUGINT			(1<<5)
+#define DCSR_HALT			(1<<3)
+#define DCSR_STEP			(1<<2)
+#define DCSR_PRV			(3<<0)
+
+#define DCSR_CAUSE_NONE			0
+#define DCSR_CAUSE_SWBP			1
+#define DCSR_CAUSE_HWBP			2
+#define DCSR_CAUSE_DEBUGINT		3
+#define DCSR_CAUSE_STEP			4
+#define DCSR_CAUSE_HALT			5
+
+#define MCONTROL_TYPE(xlen)		(0xfULL<<((xlen)-4))
+#define MCONTROL_DMODE(xlen)		(1ULL<<((xlen)-5))
+#define MCONTROL_MASKMAX(xlen)		(0x3fULL<<((xlen)-11))
+
+#define MCONTROL_SELECT			(1<<19)
+#define MCONTROL_TIMING			(1<<18)
+#define MCONTROL_ACTION			(0x3f<<12)
+#define MCONTROL_CHAIN			(1<<11)
+#define MCONTROL_MATCH			(0xf<<7)
+#define MCONTROL_M			(1<<6)
+#define MCONTROL_H			(1<<5)
+#define MCONTROL_S			(1<<4)
+#define MCONTROL_U			(1<<3)
+#define MCONTROL_EXECUTE		(1<<2)
+#define MCONTROL_STORE			(1<<1)
+#define MCONTROL_LOAD			(1<<0)
+
+#define MCONTROL_TYPE_NONE		0
+#define MCONTROL_TYPE_MATCH		2
+
+#define MCONTROL_ACTION_DEBUG_EXCEPTION	0
+#define MCONTROL_ACTION_DEBUG_MODE	1
+#define MCONTROL_ACTION_TRACE_START	2
+#define MCONTROL_ACTION_TRACE_STOP	3
+#define MCONTROL_ACTION_TRACE_EMIT	4
+
+#define MCONTROL_MATCH_EQUAL		0
+#define MCONTROL_MATCH_NAPOT		1
+#define MCONTROL_MATCH_GE		2
+#define MCONTROL_MATCH_LT		3
+#define MCONTROL_MATCH_MASK_LOW		4
+#define MCONTROL_MATCH_MASK_HIGH	5
+
+#define IRQ_S_SOFT			1
+#define IRQ_H_SOFT			2
+#define IRQ_M_SOFT			3
+#define IRQ_S_TIMER			5
+#define IRQ_H_TIMER			6
+#define IRQ_M_TIMER			7
+#define IRQ_S_EXT			9
+#define IRQ_H_EXT			10
+#define IRQ_M_EXT			11
+#define IRQ_COP				12
+#define IRQ_HOST			13
+
+#define MIP_SSIP			(1 << IRQ_S_SOFT)
+#define MIP_HSIP			(1 << IRQ_H_SOFT)
+#define MIP_MSIP			(1 << IRQ_M_SOFT)
+#define MIP_STIP			(1 << IRQ_S_TIMER)
+#define MIP_HTIP			(1 << IRQ_H_TIMER)
+#define MIP_MTIP			(1 << IRQ_M_TIMER)
+#define MIP_SEIP			(1 << IRQ_S_EXT)
+#define MIP_HEIP			(1 << IRQ_H_EXT)
+#define MIP_MEIP			(1 << IRQ_M_EXT)
+
+#define SIP_SSIP			MIP_SSIP
+#define SIP_STIP			MIP_STIP
+#define SIP_SEIP			MIP_SEIP
+
+/* Interrupt Enable and Interrupt Pending flags */
+#define SIE_SSIE			MIP_SSIP
+#define SIE_STIE			MIP_STIP
+#define SIE_SEIE			MIP_SEIP
+
+#define PRV_U				0
+#define PRV_S				1
+#define PRV_H				2
+#define PRV_M				3
+
+#define SATP32_MODE			_AC(0x80000000, UL)
+#define SATP32_MODE_SHIFT		31
+#define SATP32_ASID			_AC(0x7FC00000, UL)
+#define SATP32_ASID_SHIFT		22
+#define SATP32_PPN			_AC(0x003FFFFF, UL)
+
+#define SATP64_MODE			_AC(0xF000000000000000, UL)
+#define SATP64_MODE_SHIFT		60
+#define SATP64_ASID			_AC(0x0FFFF00000000000, UL)
+#define SATP64_ASID_SHIFT		44
+#define SATP64_PPN			_AC(0x00000FFFFFFFFFFF, UL)
+
+#define SATP_MODE_OFF			_AC(0, UL)
+#define SATP_MODE_SV32			_AC(1, UL)
+#define SATP_MODE_SV39			_AC(8, UL)
+#define SATP_MODE_SV48			_AC(9, UL)
+#define SATP_MODE_SV57			_AC(10, UL)
+#define SATP_MODE_SV64			_AC(11, UL)
+
+#define HGATP_MODE_OFF			_AC(0, UL)
+#define HGATP_MODE_SV32X4		_AC(1, UL)
+#define HGATP_MODE_SV39X4		_AC(8, UL)
+#define HGATP_MODE_SV48X4		_AC(9, UL)
+
+#define HGATP32_MODE_SHIFT		31
+#define HGATP32_VMID_SHIFT		22
+#define HGATP32_VMID_MASK		_AC(0x1FC00000, UL)
+#define HGATP32_PPN			_AC(0x003FFFFF, UL)
+
+#define HGATP64_MODE_SHIFT		60
+#define HGATP64_VMID_SHIFT		44
+#define HGATP64_VMID_MASK		_AC(0x03FFF00000000000, UL)
+#define HGATP64_PPN			_AC(0x00000FFFFFFFFFFF, UL)
+
+/* SATP flags */
+#ifdef CONFIG_64BIT
+#define SATP_PPN			_AC(0x00000FFFFFFFFFFF, UL)
+#define SATP_ASID_SHIFT			SATP64_ASID_SHIFT
+#define SATP_ASID_MASK			SATP64_ASID_MASK
+#define SATP_MODE			(SATP_MODE_SV39 << SATP64_MODE_SHIFT)
+#define SATP_MODE_SHIFT			SATP64_MODE_SHIFT
+
+#define HGATP_PPN			HGATP64_PPN
+#define HGATP_VMID_SHIFT		HGATP64_VMID_SHIFT
+#define HGATP_VMID_MASK			HGATP64_VMID_MASK
+#define HGATP_MODE			(HGATP_MODE_SV39X4 << HGATP64_MODE_SHIFT)
+#else
+#define SATP_PPN			SATP32_PPN
+#define SATP_ASID_SHIFT			SATP32_ASID_SHIFT
+#define SATP_ASID_MASK			SATP32_ASID_MASK
+#define SATP_MODE			(SATP_MODE_SV32 << SATP32_MODE_SHIFT)
+#define SATP_MODE_SHIFT			SATP32_MODE_SHIFT
+
+#define HGATP_PPN			HGATP32_PPN
+#define HGATP_VMID_SHIFT		HGATP32_VMID_SHIFT
+#define HGATP_VMID_MASK			HGATP32_VMID_MASK
+#define HGATP_MODE			(HGATP_MODE_SV32X4 << HGATP32_MODE_SHIFT)
+#endif
+
+/* SCAUSE */
+#ifdef CONFIG_64BIT
+#define SCAUSE_INTERRUPT_MASK   _AC(0x8000000000000000, UL)
+#define SCAUSE_EXC_MASK		_AC(0x7FFFFFFFFFFFFFFF, UL)
+#else
+#define SCAUSE_INTERRUPT_MASK   _AC(0x80000000, UL)
+#define SCAUSE_EXC_MASK		_AC(0x7FFFFFFF, UL)
+#endif
+
+#define PMP_R				0x01
+#define PMP_W				0x02
+#define PMP_X				0x04
+#define PMP_A				0x18
+#define PMP_A_TOR			0x08
+#define PMP_A_NA4			0x10
+#define PMP_A_NAPOT			0x18
+#define PMP_L				0x80
+
+#define PMP_SHIFT			2
+#define PMP_COUNT			16
+
+/* page table entry (PTE) fields */
+#define PTE_V				0x001 /* Valid */
+#define PTE_R				0x002 /* Read */
+#define PTE_W				0x004 /* Write */
+#define PTE_X				0x008 /* Execute */
+#define PTE_U				0x010 /* User */
+#define PTE_G				0x020 /* Global */
+#define PTE_A				0x040 /* Accessed */
+#define PTE_D				0x080 /* Dirty */
+#define PTE_SOFT			0x300 /* Reserved for Software */
+
+#define PTE_PPN_SHIFT			10
+
+#ifdef CONFIG_64BIT
+#define PTE_LEVEL_BITS			9
+#else
+#define PTE_LEVEL_BITS			10
+#endif
+
+#define CSR_USTATUS			0x0
+#define CSR_FFLAGS			0x1
+#define CSR_FRM				0x2
+#define CSR_FCSR			0x3
+#define CSR_CYCLE			0xc00
+#define CSR_UIE				0x4
+#define CSR_UTVEC			0x5
+#define CSR_USCRATCH			0x40
+#define CSR_UEPC			0x41
+#define CSR_UCAUSE			0x42
+#define CSR_UTVAL			0x43
+#define CSR_UIP				0x44
+#define CSR_TIME			0xc01
+#define CSR_INSTRET			0xc02
+#define CSR_HPMCOUNTER3			0xc03
+#define CSR_HPMCOUNTER4			0xc04
+#define CSR_HPMCOUNTER5			0xc05
+#define CSR_HPMCOUNTER6			0xc06
+#define CSR_HPMCOUNTER7			0xc07
+#define CSR_HPMCOUNTER8			0xc08
+#define CSR_HPMCOUNTER9			0xc09
+#define CSR_HPMCOUNTER10		0xc0a
+#define CSR_HPMCOUNTER11		0xc0b
+#define CSR_HPMCOUNTER12		0xc0c
+#define CSR_HPMCOUNTER13		0xc0d
+#define CSR_HPMCOUNTER14		0xc0e
+#define CSR_HPMCOUNTER15		0xc0f
+#define CSR_HPMCOUNTER16		0xc10
+#define CSR_HPMCOUNTER17		0xc11
+#define CSR_HPMCOUNTER18		0xc12
+#define CSR_HPMCOUNTER19		0xc13
+#define CSR_HPMCOUNTER20		0xc14
+#define CSR_HPMCOUNTER21		0xc15
+#define CSR_HPMCOUNTER22		0xc16
+#define CSR_HPMCOUNTER23		0xc17
+#define CSR_HPMCOUNTER24		0xc18
+#define CSR_HPMCOUNTER25		0xc19
+#define CSR_HPMCOUNTER26		0xc1a
+#define CSR_HPMCOUNTER27		0xc1b
+#define CSR_HPMCOUNTER28		0xc1c
+#define CSR_HPMCOUNTER29		0xc1d
+#define CSR_HPMCOUNTER30		0xc1e
+#define CSR_HPMCOUNTER31		0xc1f
+#define CSR_SSTATUS			0x100
+#define CSR_SIE				0x104
+#define CSR_STVEC			0x105
+#define CSR_SCOUNTEREN			0x106
+#define CSR_SSCRATCH			0x140
+#define CSR_SEPC			0x141
+#define CSR_SCAUSE			0x142
+#define CSR_STVAL			0x143
+#define CSR_SIP				0x144
+#define CSR_SATP			0x180
+
+#define CSR_BSSTATUS			0x200
+#define CSR_BSIE			0x204
+#define CSR_BSTVEC			0x205
+#define CSR_BSSCRATCH			0x240
+#define CSR_BSEPC			0x241
+#define CSR_BSCAUSE			0x242
+#define CSR_BSTVAL			0x243
+#define CSR_BSIP			0x244
+#define CSR_BSATP			0x280
+
+#define CSR_MSTATUS			0x300
+#define CSR_MISA			0x301
+#define CSR_MEDELEG			0x302
+#define CSR_MIDELEG			0x303
+#define CSR_MIE				0x304
+#define CSR_MTVEC			0x305
+#define CSR_MCOUNTEREN			0x306
+#define CSR_MSCRATCH			0x340
+#define CSR_MEPC			0x341
+#define CSR_MCAUSE			0x342
+#define CSR_MTVAL			0x343
+#define CSR_MIP				0x344
+#define CSR_PMPCFG0			0x3a0
+#define CSR_PMPCFG1			0x3a1
+#define CSR_PMPCFG2			0x3a2
+#define CSR_PMPCFG3			0x3a3
+#define CSR_PMPADDR0			0x3b0
+#define CSR_PMPADDR1			0x3b1
+#define CSR_PMPADDR2			0x3b2
+#define CSR_PMPADDR3			0x3b3
+#define CSR_PMPADDR4			0x3b4
+#define CSR_PMPADDR5			0x3b5
+#define CSR_PMPADDR6			0x3b6
+#define CSR_PMPADDR7			0x3b7
+#define CSR_PMPADDR8			0x3b8
+#define CSR_PMPADDR9			0x3b9
+#define CSR_PMPADDR10			0x3ba
+#define CSR_PMPADDR11			0x3bb
+#define CSR_PMPADDR12			0x3bc
+#define CSR_PMPADDR13			0x3bd
+#define CSR_PMPADDR14			0x3be
+#define CSR_PMPADDR15			0x3bf
+#define CSR_TSELECT			0x7a0
+#define CSR_TDATA1			0x7a1
+#define CSR_TDATA2			0x7a2
+#define CSR_TDATA3			0x7a3
+#define CSR_DCSR			0x7b0
+#define CSR_DPC				0x7b1
+#define CSR_DSCRATCH			0x7b2
+
+#define CSR_HSTATUS			0xa00
+#define CSR_HEDELEG			0xa02
+#define CSR_HIDELEG			0xa03
+#define CSR_HGATP			0xa80
+
+#define CSR_MCYCLE			0xb00
+#define CSR_MINSTRET			0xb02
+#define CSR_MHPMCOUNTER3		0xb03
+#define CSR_MHPMCOUNTER4		0xb04
+#define CSR_MHPMCOUNTER5		0xb05
+#define CSR_MHPMCOUNTER6		0xb06
+#define CSR_MHPMCOUNTER7		0xb07
+#define CSR_MHPMCOUNTER8		0xb08
+#define CSR_MHPMCOUNTER9		0xb09
+#define CSR_MHPMCOUNTER10		0xb0a
+#define CSR_MHPMCOUNTER11		0xb0b
+#define CSR_MHPMCOUNTER12		0xb0c
+#define CSR_MHPMCOUNTER13		0xb0d
+#define CSR_MHPMCOUNTER14		0xb0e
+#define CSR_MHPMCOUNTER15		0xb0f
+#define CSR_MHPMCOUNTER16		0xb10
+#define CSR_MHPMCOUNTER17		0xb11
+#define CSR_MHPMCOUNTER18		0xb12
+#define CSR_MHPMCOUNTER19		0xb13
+#define CSR_MHPMCOUNTER20		0xb14
+#define CSR_MHPMCOUNTER21		0xb15
+#define CSR_MHPMCOUNTER22		0xb16
+#define CSR_MHPMCOUNTER23		0xb17
+#define CSR_MHPMCOUNTER24		0xb18
+#define CSR_MHPMCOUNTER25		0xb19
+#define CSR_MHPMCOUNTER26		0xb1a
+#define CSR_MHPMCOUNTER27		0xb1b
+#define CSR_MHPMCOUNTER28		0xb1c
+#define CSR_MHPMCOUNTER29		0xb1d
+#define CSR_MHPMCOUNTER30		0xb1e
+#define CSR_MHPMCOUNTER31		0xb1f
+#define CSR_MHPMEVENT3			0x323
+#define CSR_MHPMEVENT4			0x324
+#define CSR_MHPMEVENT5			0x325
+#define CSR_MHPMEVENT6			0x326
+#define CSR_MHPMEVENT7			0x327
+#define CSR_MHPMEVENT8			0x328
+#define CSR_MHPMEVENT9			0x329
+#define CSR_MHPMEVENT10			0x32a
+#define CSR_MHPMEVENT11			0x32b
+#define CSR_MHPMEVENT12			0x32c
+#define CSR_MHPMEVENT13			0x32d
+#define CSR_MHPMEVENT14			0x32e
+#define CSR_MHPMEVENT15			0x32f
+#define CSR_MHPMEVENT16			0x330
+#define CSR_MHPMEVENT17			0x331
+#define CSR_MHPMEVENT18			0x332
+#define CSR_MHPMEVENT19			0x333
+#define CSR_MHPMEVENT20			0x334
+#define CSR_MHPMEVENT21			0x335
+#define CSR_MHPMEVENT22			0x336
+#define CSR_MHPMEVENT23			0x337
+#define CSR_MHPMEVENT24			0x338
+#define CSR_MHPMEVENT25			0x339
+#define CSR_MHPMEVENT26			0x33a
+#define CSR_MHPMEVENT27			0x33b
+#define CSR_MHPMEVENT28			0x33c
+#define CSR_MHPMEVENT29			0x33d
+#define CSR_MHPMEVENT30			0x33e
+#define CSR_MHPMEVENT31			0x33f
+#define CSR_MVENDORID			0xf11
+#define CSR_MARCHID			0xf12
+#define CSR_MIMPID			0xf13
+#define CSR_MHARTID			0xf14
+#define CSR_CYCLEH			0xc80
+#define CSR_TIMEH			0xc81
+#define CSR_INSTRETH			0xc82
+#define CSR_HPMCOUNTER3H		0xc83
+#define CSR_HPMCOUNTER4H		0xc84
+#define CSR_HPMCOUNTER5H		0xc85
+#define CSR_HPMCOUNTER6H		0xc86
+#define CSR_HPMCOUNTER7H		0xc87
+#define CSR_HPMCOUNTER8H		0xc88
+#define CSR_HPMCOUNTER9H		0xc89
+#define CSR_HPMCOUNTER10H		0xc8a
+#define CSR_HPMCOUNTER11H		0xc8b
+#define CSR_HPMCOUNTER12H		0xc8c
+#define CSR_HPMCOUNTER13H		0xc8d
+#define CSR_HPMCOUNTER14H		0xc8e
+#define CSR_HPMCOUNTER15H		0xc8f
+#define CSR_HPMCOUNTER16H		0xc90
+#define CSR_HPMCOUNTER17H		0xc91
+#define CSR_HPMCOUNTER18H		0xc92
+#define CSR_HPMCOUNTER19H		0xc93
+#define CSR_HPMCOUNTER20H		0xc94
+#define CSR_HPMCOUNTER21H		0xc95
+#define CSR_HPMCOUNTER22H		0xc96
+#define CSR_HPMCOUNTER23H		0xc97
+#define CSR_HPMCOUNTER24H		0xc98
+#define CSR_HPMCOUNTER25H		0xc99
+#define CSR_HPMCOUNTER26H		0xc9a
+#define CSR_HPMCOUNTER27H		0xc9b
+#define CSR_HPMCOUNTER28H		0xc9c
+#define CSR_HPMCOUNTER29H		0xc9d
+#define CSR_HPMCOUNTER30H		0xc9e
+#define CSR_HPMCOUNTER31H		0xc9f
+#define CSR_MCYCLEH			0xb80
+#define CSR_MINSTRETH			0xb82
+#define CSR_MHPMCOUNTER3H		0xb83
+#define CSR_MHPMCOUNTER4H		0xb84
+#define CSR_MHPMCOUNTER5H		0xb85
+#define CSR_MHPMCOUNTER6H		0xb86
+#define CSR_MHPMCOUNTER7H		0xb87
+#define CSR_MHPMCOUNTER8H		0xb88
+#define CSR_MHPMCOUNTER9H		0xb89
+#define CSR_MHPMCOUNTER10H		0xb8a
+#define CSR_MHPMCOUNTER11H		0xb8b
+#define CSR_MHPMCOUNTER12H		0xb8c
+#define CSR_MHPMCOUNTER13H		0xb8d
+#define CSR_MHPMCOUNTER14H		0xb8e
+#define CSR_MHPMCOUNTER15H		0xb8f
+#define CSR_MHPMCOUNTER16H		0xb90
+#define CSR_MHPMCOUNTER17H		0xb91
+#define CSR_MHPMCOUNTER18H		0xb92
+#define CSR_MHPMCOUNTER19H		0xb93
+#define CSR_MHPMCOUNTER20H		0xb94
+#define CSR_MHPMCOUNTER21H		0xb95
+#define CSR_MHPMCOUNTER22H		0xb96
+#define CSR_MHPMCOUNTER23H		0xb97
+#define CSR_MHPMCOUNTER24H		0xb98
+#define CSR_MHPMCOUNTER25H		0xb99
+#define CSR_MHPMCOUNTER26H		0xb9a
+#define CSR_MHPMCOUNTER27H		0xb9b
+#define CSR_MHPMCOUNTER28H		0xb9c
+#define CSR_MHPMCOUNTER29H		0xb9d
+#define CSR_MHPMCOUNTER30H		0xb9e
+#define CSR_MHPMCOUNTER31H		0xb9f
+
+#define CAUSE_MISALIGNED_FETCH		0x0
+#define CAUSE_FETCH_ACCESS		0x1
+#define CAUSE_ILLEGAL_INSTRUCTION	0x2
+#define CAUSE_BREAKPOINT		0x3
+#define CAUSE_MISALIGNED_LOAD		0x4
+#define CAUSE_LOAD_ACCESS		0x5
+#define CAUSE_MISALIGNED_STORE		0x6
+#define CAUSE_STORE_ACCESS		0x7
+#define CAUSE_USER_ECALL		0x8
+#define CAUSE_HYPERVISOR_ECALL		0x9
+#define CAUSE_SUPERVISOR_ECALL		0xa
+#define CAUSE_MACHINE_ECALL		0xb
+#define CAUSE_FETCH_PAGE_FAULT		0xc
+#define CAUSE_LOAD_PAGE_FAULT		0xd
+#define CAUSE_STORE_PAGE_FAULT		0xf
+
+#define INSN_MATCH_LB			0x3
+#define INSN_MASK_LB			0x707f
+#define INSN_MATCH_LH			0x1003
+#define INSN_MASK_LH			0x707f
+#define INSN_MATCH_LW			0x2003
+#define INSN_MASK_LW			0x707f
+#define INSN_MATCH_LD			0x3003
+#define INSN_MASK_LD			0x707f
+#define INSN_MATCH_LBU			0x4003
+#define INSN_MASK_LBU			0x707f
+#define INSN_MATCH_LHU			0x5003
+#define INSN_MASK_LHU			0x707f
+#define INSN_MATCH_LWU			0x6003
+#define INSN_MASK_LWU			0x707f
+#define INSN_MATCH_SB			0x23
+#define INSN_MASK_SB			0x707f
+#define INSN_MATCH_SH			0x1023
+#define INSN_MASK_SH			0x707f
+#define INSN_MATCH_SW			0x2023
+#define INSN_MASK_SW			0x707f
+#define INSN_MATCH_SD			0x3023
+#define INSN_MASK_SD			0x707f
+
+#define INSN_MATCH_FLW			0x2007
+#define INSN_MASK_FLW			0x707f
+#define INSN_MATCH_FLD			0x3007
+#define INSN_MASK_FLD			0x707f
+#define INSN_MATCH_FLQ			0x4007
+#define INSN_MASK_FLQ			0x707f
+#define INSN_MATCH_FSW			0x2027
+#define INSN_MASK_FSW			0x707f
+#define INSN_MATCH_FSD			0x3027
+#define INSN_MASK_FSD			0x707f
+#define INSN_MATCH_FSQ			0x4027
+#define INSN_MASK_FSQ			0x707f
+
+#define INSN_MATCH_C_LD			0x6000
+#define INSN_MASK_C_LD			0xe003
+#define INSN_MATCH_C_SD			0xe000
+#define INSN_MASK_C_SD			0xe003
+#define INSN_MATCH_C_LW			0x4000
+#define INSN_MASK_C_LW			0xe003
+#define INSN_MATCH_C_SW			0xc000
+#define INSN_MASK_C_SW			0xe003
+#define INSN_MATCH_C_LDSP		0x6002
+#define INSN_MASK_C_LDSP		0xe003
+#define INSN_MATCH_C_SDSP		0xe002
+#define INSN_MASK_C_SDSP		0xe003
+#define INSN_MATCH_C_LWSP		0x4002
+#define INSN_MASK_C_LWSP		0xe003
+#define INSN_MATCH_C_SWSP		0xc002
+#define INSN_MASK_C_SWSP		0xe003
+
+#define INSN_MATCH_C_FLD		0x2000
+#define INSN_MASK_C_FLD			0xe003
+#define INSN_MATCH_C_FLW		0x6000
+#define INSN_MASK_C_FLW			0xe003
+#define INSN_MATCH_C_FSD		0xa000
+#define INSN_MASK_C_FSD			0xe003
+#define INSN_MATCH_C_FSW		0xe000
+#define INSN_MASK_C_FSW			0xe003
+#define INSN_MATCH_C_FLDSP		0x2002
+#define INSN_MASK_C_FLDSP		0xe003
+#define INSN_MATCH_C_FSDSP		0xa002
+#define INSN_MASK_C_FSDSP		0xe003
+#define INSN_MATCH_C_FLWSP		0x6002
+#define INSN_MASK_C_FLWSP		0xe003
+#define INSN_MATCH_C_FSWSP		0xe002
+#define INSN_MASK_C_FSWSP		0xe003
+
+#define INSN_MASK_WFI			0xffffff00
+#define INSN_MATCH_WFI			0x10500000
+
+#define INSN_LEN(insn)			((((insn) & 0x3) < 0x3) ? 2 : 4)
+
+#ifdef CONFIG_64BIT
+#define LOG_REGBYTES			3
+#else
+#define LOG_REGBYTES			2
+#endif
+#define REGBYTES			(1 << LOG_REGBYTES)
+
+#define SH_RD				7
+#define SH_RS1				15
+#define SH_RS2				20
+#define SH_RS2C				2
+
+#define RV_X(x, s, n)			(((x) >> (s)) & ((1 << (n)) - 1))
+#define RVC_LW_IMM(x)			((RV_X(x, 6, 1) << 2) | \
+					 (RV_X(x, 10, 3) << 3) | \
+					 (RV_X(x, 5, 1) << 6))
+#define RVC_LD_IMM(x)			((RV_X(x, 10, 3) << 3) | \
+					 (RV_X(x, 5, 2) << 6))
+#define RVC_LWSP_IMM(x)			((RV_X(x, 4, 3) << 2) | \
+					 (RV_X(x, 12, 1) << 5) | \
+					 (RV_X(x, 2, 2) << 6))
+#define RVC_LDSP_IMM(x)			((RV_X(x, 5, 2) << 3) | \
+					 (RV_X(x, 12, 1) << 5) | \
+					 (RV_X(x, 2, 3) << 6))
+#define RVC_SWSP_IMM(x)			((RV_X(x, 9, 4) << 2) | \
+					 (RV_X(x, 7, 2) << 6))
+#define RVC_SDSP_IMM(x)			((RV_X(x, 10, 3) << 3) | \
+					 (RV_X(x, 7, 3) << 6))
+#define RVC_RS1S(insn)			(8 + RV_X(insn, SH_RD, 3))
+#define RVC_RS2S(insn)			(8 + RV_X(insn, SH_RS2C, 3))
+#define RVC_RS2(insn)			RV_X(insn, SH_RS2C, 5)
+
+#define SHIFT_RIGHT(x, y)		\
+	((y) < 0 ? ((x) << -(y)) : ((x) >> (y)))
+
+#define REG_MASK			\
+	((1 << (5 + LOG_REGBYTES)) - (1 << LOG_REGBYTES))
+
+#define REG_OFFSET(insn, pos)		\
+	(SHIFT_RIGHT((insn), (pos) - LOG_REGBYTES) & REG_MASK)
+
+#define REG_PTR(insn, pos, regs)	\
+	(ulong *)((ulong)(regs) + REG_OFFSET(insn, pos))
+
+#define GET_RM(insn)			(((insn) >> 12) & 7)
+
+#define GET_RS1(insn, regs)		(*REG_PTR(insn, SH_RS1, regs))
+#define GET_RS2(insn, regs)		(*REG_PTR(insn, SH_RS2, regs))
+#define GET_RS1S(insn, regs)		(*REG_PTR(RVC_RS1S(insn), 0, regs))
+#define GET_RS2S(insn, regs)		(*REG_PTR(RVC_RS2S(insn), 0, regs))
+#define GET_RS2C(insn, regs)		(*REG_PTR(insn, SH_RS2C, regs))
+#define GET_SP(regs)			(*REG_PTR(2, 0, regs))
+#define SET_RD(insn, regs, val)		(*REG_PTR(insn, SH_RD, regs) = (val))
+#define IMM_I(insn)			((s32)(insn) >> 20)
+#define IMM_S(insn)			(((s32)(insn) >> 25 << 5) | \
+					 (s32)(((insn) >> 7) & 0x1f))
+#define MASK_FUNCT3			0x7000
+
+#endif
diff --git a/xen/include/asm-riscv/setup.h b/xen/include/asm-riscv/setup.h
new file mode 100644
index 0000000000..8c9d8066d5
--- /dev/null
+++ b/xen/include/asm-riscv/setup.h
@@ -0,0 +1,16 @@ 
+#ifndef __RISCV_SETUP_H_
+#define __RISCV_SETUP_H_
+
+#include <public/version.h>
+
+#define max_init_domid (0)
+
+#endif /* __RISCV_SETUP_H_ */
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-riscv/smp.h b/xen/include/asm-riscv/smp.h
new file mode 100644
index 0000000000..b2224d087f
--- /dev/null
+++ b/xen/include/asm-riscv/smp.h
@@ -0,0 +1,50 @@ 
+/******************************************************************************
+ *
+ * Copyright 2019 (C) Alistair Francis <alistair.francis@wdc.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _ASM_RISCV_SMP_H
+#define _ASM_RISCV_SMP_H
+
+#ifndef __ASSEMBLY__
+#include <xen/cpumask.h>
+#include <asm/current.h>
+#endif
+
+DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_mask);
+DECLARE_PER_CPU(cpumask_var_t, cpu_core_mask);
+
+#define HARTID_INVALID		-1
+
+/*
+ * Do we, for platform reasons, need to actually keep CPUs online when we
+ * would otherwise prefer them to be off?
+ */
+#define park_offline_cpus true
+
+#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
+
+#define raw_smp_processor_id() (get_processor_id())
+
+void stop_cpu(void);
+
+#endif /* _ASM_RISCV_SMP_H */
diff --git a/xen/include/asm-riscv/softirq.h b/xen/include/asm-riscv/softirq.h
new file mode 100644
index 0000000000..976e0ebd70
--- /dev/null
+++ b/xen/include/asm-riscv/softirq.h
@@ -0,0 +1,16 @@ 
+#ifndef __ASM_SOFTIRQ_H__
+#define __ASM_SOFTIRQ_H__
+
+#define NR_ARCH_SOFTIRQS       0
+
+#define arch_skip_send_event_check(cpu) 0
+
+#endif /* __ASM_SOFTIRQ_H__ */
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-riscv/spinlock.h b/xen/include/asm-riscv/spinlock.h
new file mode 100644
index 0000000000..2f47b4d9ad
--- /dev/null
+++ b/xen/include/asm-riscv/spinlock.h
@@ -0,0 +1,13 @@ 
+#ifndef __ASM_SPINLOCK_H
+#define __ASM_SPINLOCK_H
+
+#define arch_lock_acquire_barrier() smp_mb()
+#define arch_lock_release_barrier() smp_mb()
+
+#define arch_lock_relax() cpu_relax()
+#define arch_lock_signal() do { \
+} while(0)
+
+#define arch_lock_signal_wmb()  arch_lock_signal()
+
+#endif /* __ASM_SPINLOCK_H */
diff --git a/xen/include/asm-riscv/string.h b/xen/include/asm-riscv/string.h
new file mode 100644
index 0000000000..733e9e00d3
--- /dev/null
+++ b/xen/include/asm-riscv/string.h
@@ -0,0 +1,28 @@ 
+/******************************************************************************
+ *
+ * Copyright 2019 (C) Alistair Francis <alistair.francis@wdc.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _ASM_RISCV_STRING_H
+#define _ASM_RISCV_STRING_H
+
+#endif /* _ASM_RISCV_STRING_H */
diff --git a/xen/include/asm-riscv/sysregs.h b/xen/include/asm-riscv/sysregs.h
new file mode 100644
index 0000000000..0f7f5c644b
--- /dev/null
+++ b/xen/include/asm-riscv/sysregs.h
@@ -0,0 +1,14 @@ 
+#ifndef __ASM_RISCV_SYSREGS_H
+#define __ASM_RISCV_SYSREGS_H
+
+#endif /* __ASM_RISCV_SYSREGS_H */
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
+
+
diff --git a/xen/include/asm-riscv/system.h b/xen/include/asm-riscv/system.h
new file mode 100644
index 0000000000..58c83af5b5
--- /dev/null
+++ b/xen/include/asm-riscv/system.h
@@ -0,0 +1,96 @@ 
+/*
+ * Based on arch/arm/include/asm/system.h
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2013 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ASM_RISCV_BARRIER_H
+#define _ASM_RISCV_BARRIER_H
+
+#include <asm/csr.h>
+#include <xen/lib.h>
+
+#ifndef __ASSEMBLY__
+
+#define nop()		__asm__ __volatile__ ("nop")
+
+#define RISCV_FENCE(p, s) \
+	__asm__ __volatile__ ("fence " #p "," #s : : : "memory")
+
+/* These barriers need to enforce ordering on both devices or memory. */
+#define mb()		RISCV_FENCE(iorw,iorw)
+#define rmb()		RISCV_FENCE(ir,ir)
+#define wmb()		RISCV_FENCE(ow,ow)
+
+/* These barriers do not need to enforce ordering on devices, just memory. */
+#define smp_mb()	RISCV_FENCE(rw,rw)
+#define smp_rmb()	RISCV_FENCE(r,r)
+#define smp_wmb()	RISCV_FENCE(w,w)
+
+#define __smp_store_release(p, v)					\
+do {									\
+	compiletime_assert_atomic_type(*p);				\
+	RISCV_FENCE(rw,w);						\
+	WRITE_ONCE(*p, v);						\
+} while (0)
+
+#define __smp_load_acquire(p)						\
+({									\
+	typeof(*p) ___p1 = READ_ONCE(*p);				\
+	compiletime_assert_atomic_type(*p);				\
+	RISCV_FENCE(r,rw);						\
+	___p1;								\
+})
+
+static inline unsigned long local_save_flags(void)
+{
+	return csr_read(sstatus);
+}
+
+static inline void local_irq_enable(void)
+{
+	csr_set(sstatus, SR_SIE);
+}
+
+static inline void local_irq_disable(void)
+{
+	csr_clear(sstatus, SR_SIE);
+}
+
+#define local_irq_save(x)                     \
+({                                            \
+    x = csr_read_clear(sstatus, SR_SIE);      \
+    local_irq_disable();                      \
+})
+
+static inline void local_irq_restore(unsigned long flags)
+{
+	csr_set(sstatus, flags & SR_SIE);
+}
+
+static inline int local_irq_is_enabled(void)
+{
+    unsigned long flags = local_save_flags();
+
+    return flags & SR_SIE;
+}
+
+#define arch_fetch_and_add(x, v) __sync_fetch_and_add(x, v)
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_RISCV_BARRIER_H */
diff --git a/xen/include/asm-riscv/time.h b/xen/include/asm-riscv/time.h
new file mode 100644
index 0000000000..09ec16ec8b
--- /dev/null
+++ b/xen/include/asm-riscv/time.h
@@ -0,0 +1,60 @@ 
+ /*
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_TIMEX_H
+#define _ASM_RISCV_TIMEX_H
+
+typedef unsigned long cycles_t;
+
+static inline cycles_t get_cycles_inline(void)
+{
+	cycles_t n;
+
+	__asm__ __volatile__ (
+		"rdtime %0"
+		: "=r" (n));
+	return n;
+}
+#define get_cycles get_cycles_inline
+
+#ifdef CONFIG_64BIT
+static inline uint64_t get_cycles64(void)
+{
+        return get_cycles();
+}
+#else
+static inline uint64_t get_cycles64(void)
+{
+	u32 lo, hi, tmp;
+	__asm__ __volatile__ (
+		"1:\n"
+		"rdtimeh %0\n"
+		"rdtime %1\n"
+		"rdtimeh %2\n"
+		"bne %0, %2, 1b"
+		: "=&r" (hi), "=&r" (lo), "=&r" (tmp));
+	return ((u64)hi << 32) | lo;
+}
+#endif
+
+#define ARCH_HAS_READ_CURRENT_TIMER
+
+static inline int read_current_timer(unsigned long *timer_val)
+{
+	*timer_val = get_cycles();
+	return 0;
+}
+
+void preinit_xen_time(void);
+
+#endif /* _ASM_RISCV_TIMEX_H */
diff --git a/xen/include/asm-riscv/trace.h b/xen/include/asm-riscv/trace.h
new file mode 100644
index 0000000000..e06def61f6
--- /dev/null
+++ b/xen/include/asm-riscv/trace.h
@@ -0,0 +1,12 @@ 
+#ifndef __ASM_TRACE_H__
+#define __ASM_TRACE_H__
+
+#endif /* __ASM_TRACE_H__ */
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-riscv/types.h b/xen/include/asm-riscv/types.h
new file mode 100644
index 0000000000..48f27f97ba
--- /dev/null
+++ b/xen/include/asm-riscv/types.h
@@ -0,0 +1,73 @@ 
+#ifndef __RISCV_TYPES_H__
+#define __RISCV_TYPES_H__
+
+#ifndef __ASSEMBLY__
+
+typedef __signed__ char __s8;
+typedef unsigned char __u8;
+
+typedef __signed__ short __s16;
+typedef unsigned short __u16;
+
+typedef __signed__ int __s32;
+typedef unsigned int __u32;
+
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+#if defined(CONFIG_RISCV_32)
+typedef __signed__ long long __s64;
+typedef unsigned long long __u64;
+#elif defined (CONFIG_RISCV_64)
+typedef __signed__ long __s64;
+typedef unsigned long __u64;
+#endif
+#endif
+
+typedef signed char s8;
+typedef unsigned char u8;
+
+typedef signed short s16;
+typedef unsigned short u16;
+
+typedef signed int s32;
+typedef unsigned int u32;
+
+#if defined(CONFIG_RISCV_32)
+typedef signed long long s64;
+typedef unsigned long long u64;
+typedef u32 vaddr_t;
+#define PRIvaddr PRIx32
+typedef u64 paddr_t;
+#define INVALID_PADDR (~0ULL)
+#define PRIpaddr "016llx"
+typedef u32 register_t;
+#define PRIregister "x"
+#elif defined (CONFIG_RISCV_64)
+typedef signed long s64;
+typedef unsigned long u64;
+typedef u64 vaddr_t;
+#define PRIvaddr PRIx64
+typedef u64 paddr_t;
+#define INVALID_PADDR (~0UL)
+#define PRIpaddr "016lx"
+typedef u64 register_t;
+#define PRIregister "lx"
+#endif
+
+#if defined(__SIZE_TYPE__)
+typedef __SIZE_TYPE__ size_t;
+#else
+typedef unsigned long size_t;
+#endif
+typedef signed long ssize_t;
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __RISCV_TYPES_H__ */
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-riscv/vm_event.h b/xen/include/asm-riscv/vm_event.h
new file mode 100644
index 0000000000..32d271fcf4
--- /dev/null
+++ b/xen/include/asm-riscv/vm_event.h
@@ -0,0 +1,61 @@ 
+/*
+ * vm_event.h: architecture specific vm_event handling routines
+ *
+ * Copyright (c) 2015 Tamas K Lengyel (tamas@tklengyel.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ASM_RISCV_VM_EVENT_H__
+#define __ASM_RISCV_VM_EVENT_H__
+
+#include <xen/sched.h>
+#include <xen/vm_event.h>
+#include <public/domctl.h>
+
+static inline int vm_event_init_domain(struct domain *d)
+{
+    /* Nothing to do. */
+    return 0;
+}
+
+static inline void vm_event_cleanup_domain(struct domain *d)
+{
+    memset(&d->monitor, 0, sizeof(d->monitor));
+}
+
+static inline void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v,
+                                              vm_event_response_t *rsp)
+{
+    /* Not supported on RISCV. */
+}
+
+static inline
+void vm_event_register_write_resume(struct vcpu *v, vm_event_response_t *rsp)
+{
+    /* Not supported on RISCV. */
+}
+
+static inline
+void vm_event_emulate_check(struct vcpu *v, vm_event_response_t *rsp)
+{
+    /* Not supported on RISCV. */
+}
+
+static inline
+void vm_event_sync_event(struct vcpu *v, bool value)
+{
+    /* Not supported on RISCV. */
+}
+
+#endif /* __ASM_RISCV_VM_EVENT_H__ */
diff --git a/xen/include/asm-riscv/xenoprof.h b/xen/include/asm-riscv/xenoprof.h
new file mode 100644
index 0000000000..3db6ce3ab2
--- /dev/null
+++ b/xen/include/asm-riscv/xenoprof.h
@@ -0,0 +1,12 @@ 
+#ifndef __ASM_XENOPROF_H__
+#define __ASM_XENOPROF_H__
+
+#endif /* __ASM_XENOPROF_H__ */
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/public/arch-riscv.h b/xen/include/public/arch-riscv.h
new file mode 100644
index 0000000000..f4b4875cc7
--- /dev/null
+++ b/xen/include/public/arch-riscv.h
@@ -0,0 +1,181 @@ 
+/******************************************************************************
+ * arch-riscv.h
+ *
+ * Guest OS interface to RISC-V Xen.
+ * Initially based on the ARM implementation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright 2019 (C) Alistair Francis <alistair.francis@wdc.com>
+ */
+
+#ifndef __XEN_PUBLIC_ARCH_RISCV_H__
+#define __XEN_PUBLIC_ARCH_RISCV_H__
+
+#define  int64_aligned_t  int64_t __attribute__((aligned(8)))
+#define uint64_aligned_t uint64_t __attribute__((aligned(8)))
+
+#ifndef __ASSEMBLY__
+#define ___DEFINE_XEN_GUEST_HANDLE(name, type)                  \
+    typedef union { type *p; unsigned long q; }                 \
+        __guest_handle_ ## name;                                \
+    typedef union { type *p; uint64_aligned_t q; }              \
+        __guest_handle_64_ ## name
+
+/*
+ * XEN_GUEST_HANDLE represents a guest pointer, when passed as a field
+ * in a struct in memory. On RISCV is always 8 bytes sizes and 8 bytes
+ * aligned.
+ * XEN_GUEST_HANDLE_PARAM represents a guest pointer, when passed as an
+ * hypercall argument. It is 4 bytes on aarch32 and 8 bytes on aarch64.
+ */
+#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
+    ___DEFINE_XEN_GUEST_HANDLE(name, type);   \
+    ___DEFINE_XEN_GUEST_HANDLE(const_##name, const type)
+#define DEFINE_XEN_GUEST_HANDLE(name)   __DEFINE_XEN_GUEST_HANDLE(name, name)
+#define __XEN_GUEST_HANDLE(name)        __guest_handle_64_ ## name
+#define XEN_GUEST_HANDLE(name)          __XEN_GUEST_HANDLE(name)
+#define XEN_GUEST_HANDLE_PARAM(name)    __guest_handle_ ## name
+#define set_xen_guest_handle_raw(hnd, val)                  \
+    do {                                                    \
+        typeof(&(hnd)) _sxghr_tmp = &(hnd);                 \
+        _sxghr_tmp->q = 0;                                  \
+        _sxghr_tmp->p = val;                                \
+    } while ( 0 )
+#define set_xen_guest_handle(hnd, val) set_xen_guest_handle_raw(hnd, val)
+
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+/* Anonymous union includes both 32- and 64-bit names (e.g., r0/x0). */
+# define __DECL_REG(n64, n32) union {          \
+        uint64_t n64;                          \
+        uint32_t n32;                          \
+    }
+#else
+/* Non-gcc sources must always use the proper 64-bit name (e.g., x0). */
+#define __DECL_REG(n64, n32) uint64_t n64
+#endif
+
+struct vcpu_guest_core_regs
+{
+    unsigned long zero;
+    unsigned long ra;
+    unsigned long sp;
+    unsigned long gp;
+    unsigned long tp;
+    unsigned long t0;
+    unsigned long t1;
+    unsigned long t2;
+    unsigned long s0;
+    unsigned long s1;
+    unsigned long a0;
+    unsigned long a1;
+    unsigned long a2;
+    unsigned long a3;
+    unsigned long a4;
+    unsigned long a5;
+    unsigned long a6;
+    unsigned long a7;
+    unsigned long s2;
+    unsigned long s3;
+    unsigned long s4;
+    unsigned long s5;
+    unsigned long s6;
+    unsigned long s7;
+    unsigned long s8;
+    unsigned long s9;
+    unsigned long s10;
+    unsigned long s11;
+    unsigned long t3;
+    unsigned long t4;
+    unsigned long t5;
+    unsigned long t6;
+    unsigned long sepc;
+    unsigned long sstatus;
+    unsigned long hstatus;
+    unsigned long sp_exec;
+
+    unsigned long hedeleg;
+    unsigned long hideleg;
+    unsigned long bsstatus;
+    unsigned long bsie;
+    unsigned long bstvec;
+    unsigned long bsscratch;
+    unsigned long bsepc;
+    unsigned long bscause;
+    unsigned long bstval;
+    unsigned long bsip;
+    unsigned long bsatp;
+};
+typedef struct vcpu_guest_core_regs vcpu_guest_core_regs_t;
+DEFINE_XEN_GUEST_HANDLE(vcpu_guest_core_regs_t);
+
+typedef uint64_t xen_pfn_t;
+#define PRI_xen_pfn PRIx64
+#define PRIu_xen_pfn PRIu64
+
+typedef uint64_t xen_ulong_t;
+#define PRI_xen_ulong PRIx64
+
+#if defined(__XEN__) || defined(__XEN_TOOLS__)
+
+struct vcpu_guest_context {
+};
+typedef struct vcpu_guest_context vcpu_guest_context_t;
+DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
+
+struct xen_arch_domainconfig {
+};
+
+struct arch_vcpu_info {
+};
+typedef struct arch_vcpu_info arch_vcpu_info_t;
+
+struct arch_shared_info {
+};
+typedef struct arch_shared_info arch_shared_info_t;
+
+typedef uint64_t xen_callback_t;
+
+#endif
+
+/* Maximum number of virtual CPUs in legacy multi-processor guests. */
+/* Only one. All other VCPUS must use VCPUOP_register_vcpu_info */
+#define XEN_LEGACY_MAX_VCPUS 1
+
+/* Current supported guest VCPUs */
+#define GUEST_MAX_VCPUS 128
+
+#endif /* __ASSEMBLY__ */
+
+#ifndef __ASSEMBLY__
+/* Stub definition of PMU structure */
+typedef struct xen_pmu_arch { uint8_t dummy; } xen_pmu_arch_t;
+#endif
+
+#endif /*  __XEN_PUBLIC_ARCH_RISCV_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/public/arch-riscv/hvm/save.h b/xen/include/public/arch-riscv/hvm/save.h
new file mode 100644
index 0000000000..fa010f0315
--- /dev/null
+++ b/xen/include/public/arch-riscv/hvm/save.h
@@ -0,0 +1,39 @@ 
+/*
+ * Structure definitions for HVM state that is held by Xen and must
+ * be saved along with the domain's memory and device-model state.
+ *
+ * Copyright (c) 2012 Citrix Systems Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __XEN_PUBLIC_HVM_SAVE_RISCV_H__
+#define __XEN_PUBLIC_HVM_SAVE_RISCV_H__
+
+#endif
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/public/hvm/save.h b/xen/include/public/hvm/save.h
index f72e3a9bc4..d7505f279c 100644
--- a/xen/include/public/hvm/save.h
+++ b/xen/include/public/hvm/save.h
@@ -106,6 +106,8 @@  DECLARE_HVM_SAVE_TYPE(END, 0, struct hvm_save_end);
 #include "../arch-x86/hvm/save.h"
 #elif defined(__arm__) || defined(__aarch64__)
 #include "../arch-arm/hvm/save.h"
+#elif defined(__riscv)
+#include "../arch-riscv/hvm/save.h"
 #else
 #error "unsupported architecture"
 #endif
diff --git a/xen/include/public/pmu.h b/xen/include/public/pmu.h
index 0e1312cfe5..ac73586a3b 100644
--- a/xen/include/public/pmu.h
+++ b/xen/include/public/pmu.h
@@ -28,6 +28,8 @@ 
 #include "arch-x86/pmu.h"
 #elif defined (__arm__) || defined (__aarch64__)
 #include "arch-arm.h"
+#elif defined (__riscv)
+#include "arch-riscv.h"
 #else
 #error "Unsupported architecture"
 #endif
diff --git a/xen/include/public/xen.h b/xen/include/public/xen.h
index d2198dffad..6e439991dc 100644
--- a/xen/include/public/xen.h
+++ b/xen/include/public/xen.h
@@ -33,6 +33,8 @@ 
 #include "arch-x86/xen.h"
 #elif defined(__arm__) || defined (__aarch64__)
 #include "arch-arm.h"
+#elif defined(__riscv)
+#include "arch-riscv.h"
 #else
 #error "Unsupported architecture"
 #endif