diff mbox series

[v3,09/34] xen/riscv: introduce system.h

Message ID e577e055932d05ec34f01db57d1b2d2b1be5418d.1703255175.git.oleksii.kurochko@gmail.com (mailing list archive)
State Superseded
Headers show
Series Enable build of full Xen for RISC-V | expand

Commit Message

Oleksii Kurochko Dec. 22, 2023, 3:12 p.m. UTC
Signed-off-by: Oleksii Kurochko <oleksii.kurochko@gmail.com>
---
Changes in V3:
 - Add SPDX
 - fix code style issue
 - change prototype of local_irq_is_enabled to return bool.
   update the return code.
 - update the code style
---
Changes in V2:
 - Nothing changed. Only rebase.
---
 xen/arch/riscv/include/asm/system.h | 90 +++++++++++++++++++++++++++++
 1 file changed, 90 insertions(+)
 create mode 100644 xen/arch/riscv/include/asm/system.h

Comments

Jan Beulich Jan. 11, 2024, 4 p.m. UTC | #1
On 22.12.2023 16:12, Oleksii Kurochko wrote:
> --- /dev/null
> +++ b/xen/arch/riscv/include/asm/system.h
> @@ -0,0 +1,90 @@
> +/* SPDX-License-Identifier: GPL-2.0-only */
> +
> +#ifndef _ASM_RISCV_BARRIER_H
> +#define _ASM_RISCV_BARRIER_H

s/BARRIER/SYSTEM/ ?

With that taken care of (which I'd be happy to do while committing)
Acked-by: Jan Beulich <jbeulich@suse.com>

> +#include <xen/stdbool.h>
> +
> +#include <asm/csr.h>
> +
> +#ifndef __ASSEMBLY__
> +
> +#define RISCV_FENCE(p, s) \
> +    __asm__ __volatile__ ( "fence " #p "," #s : : : "memory" )
> +
> +/* These barriers need to enforce ordering on both devices or memory. */
> +#define mb()                    RISCV_FENCE(iorw, iorw)
> +#define rmb()                   RISCV_FENCE(ir, ir)
> +#define wmb()                   RISCV_FENCE(ow, ow)
> +
> +/* These barriers do not need to enforce ordering on devices, just memory. */
> +#define smp_mb()                RISCV_FENCE(rw, rw)
> +#define smp_rmb()               RISCV_FENCE(r, r)
> +#define smp_wmb()               RISCV_FENCE(w, w)
> +#define smp_mb__before_atomic() smp_mb()
> +#define smp_mb__after_atomic()  smp_mb()
> +
> +/*
> +#define smp_store_release(p, v)         \
> +do {                                    \
> +    compiletime_assert_atomic_type(*p); \
> +    RISCV_FENCE(rw, w);                 \
> +    WRITE_ONCE(*p, v);                  \
> +} while (0)
> +
> +#define smp_load_acquire(p)             \
> +({                                      \
> +    typeof(*p) p1 = READ_ONCE(*p);      \
> +    compiletime_assert_atomic_type(*p); \
> +    RISCV_FENCE(r,rw);                  \
> +    p1;                                 \
> +})
> +*/
> +
> +static inline unsigned long local_save_flags(void)
> +{
> +    return csr_read(sstatus);
> +}
> +
> +static inline void local_irq_enable(void)
> +{
> +    csr_set(sstatus, SSTATUS_SIE);
> +}
> +
> +static inline void local_irq_disable(void)
> +{
> +    csr_clear(sstatus, SSTATUS_SIE);
> +}
> +
> +#define local_irq_save(x)                           \
> +({                                                  \
> +    x = csr_read_clear(CSR_SSTATUS, SSTATUS_SIE);   \
> +    local_irq_disable();                            \
> +})
> +
> +static inline void local_irq_restore(unsigned long flags)
> +{
> +	csr_set(CSR_SSTATUS, flags & SSTATUS_SIE);
> +}
> +
> +static inline bool local_irq_is_enabled(void)
> +{
> +    unsigned long flags = local_save_flags();
> +
> +    return (flags & SSTATUS_SIE) != 0;

Just as a remark - when the resulting type is bool, we generally
prefer to omit the "!= 0".

Jan
Oleksii Kurochko Jan. 15, 2024, 9:28 a.m. UTC | #2
On Thu, 2024-01-11 at 17:00 +0100, Jan Beulich wrote:
> On 22.12.2023 16:12, Oleksii Kurochko wrote:
> > --- /dev/null
> > +++ b/xen/arch/riscv/include/asm/system.h
> > @@ -0,0 +1,90 @@
> > +/* SPDX-License-Identifier: GPL-2.0-only */
> > +
> > +#ifndef _ASM_RISCV_BARRIER_H
> > +#define _ASM_RISCV_BARRIER_H
> 
> s/BARRIER/SYSTEM/ ?
Yes, it should be SYSTEM. Thanks for noticing that.

> 
> With that taken care of (which I'd be happy to do while committing)
> Acked-by: Jan Beulich <jbeulich@suse.com>
Thanks a lot. I'll be happy with that.

> 
> > +#include <xen/stdbool.h>
> > +
> > +#include <asm/csr.h>
> > +
> > +#ifndef __ASSEMBLY__
> > +
> > +#define RISCV_FENCE(p, s) \
> > +    __asm__ __volatile__ ( "fence " #p "," #s : : : "memory" )
> > +
> > +/* These barriers need to enforce ordering on both devices or
> > memory. */
> > +#define mb()                    RISCV_FENCE(iorw, iorw)
> > +#define rmb()                   RISCV_FENCE(ir, ir)
> > +#define wmb()                   RISCV_FENCE(ow, ow)
> > +
> > +/* These barriers do not need to enforce ordering on devices, just
> > memory. */
> > +#define smp_mb()                RISCV_FENCE(rw, rw)
> > +#define smp_rmb()               RISCV_FENCE(r, r)
> > +#define smp_wmb()               RISCV_FENCE(w, w)
> > +#define smp_mb__before_atomic() smp_mb()
> > +#define smp_mb__after_atomic()  smp_mb()
> > +
> > +/*
> > +#define smp_store_release(p, v)         \
> > +do {                                    \
> > +    compiletime_assert_atomic_type(*p); \
> > +    RISCV_FENCE(rw, w);                 \
> > +    WRITE_ONCE(*p, v);                  \
> > +} while (0)
> > +
> > +#define smp_load_acquire(p)             \
> > +({                                      \
> > +    typeof(*p) p1 = READ_ONCE(*p);      \
> > +    compiletime_assert_atomic_type(*p); \
> > +    RISCV_FENCE(r,rw);                  \
> > +    p1;                                 \
> > +})
> > +*/
> > +
> > +static inline unsigned long local_save_flags(void)
> > +{
> > +    return csr_read(sstatus);
> > +}
> > +
> > +static inline void local_irq_enable(void)
> > +{
> > +    csr_set(sstatus, SSTATUS_SIE);
> > +}
> > +
> > +static inline void local_irq_disable(void)
> > +{
> > +    csr_clear(sstatus, SSTATUS_SIE);
> > +}
> > +
> > +#define local_irq_save(x)                           \
> > +({                                                  \
> > +    x = csr_read_clear(CSR_SSTATUS, SSTATUS_SIE);   \
> > +    local_irq_disable();                            \
> > +})
> > +
> > +static inline void local_irq_restore(unsigned long flags)
> > +{
> > +	csr_set(CSR_SSTATUS, flags & SSTATUS_SIE);
> > +}
> > +
> > +static inline bool local_irq_is_enabled(void)
> > +{
> > +    unsigned long flags = local_save_flags();
> > +
> > +    return (flags & SSTATUS_SIE) != 0;
> 
> Just as a remark - when the resulting type is bool, we generally
> prefer to omit the "!= 0".
Thanks. I'll take into account that.

~ Oleksii
diff mbox series

Patch

diff --git a/xen/arch/riscv/include/asm/system.h b/xen/arch/riscv/include/asm/system.h
new file mode 100644
index 0000000000..08c12158fc
--- /dev/null
+++ b/xen/arch/riscv/include/asm/system.h
@@ -0,0 +1,90 @@ 
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ASM_RISCV_BARRIER_H
+#define _ASM_RISCV_BARRIER_H
+
+#include <xen/stdbool.h>
+
+#include <asm/csr.h>
+
+#ifndef __ASSEMBLY__
+
+#define RISCV_FENCE(p, s) \
+    __asm__ __volatile__ ( "fence " #p "," #s : : : "memory" )
+
+/* These barriers need to enforce ordering on both devices or memory. */
+#define mb()                    RISCV_FENCE(iorw, iorw)
+#define rmb()                   RISCV_FENCE(ir, ir)
+#define wmb()                   RISCV_FENCE(ow, ow)
+
+/* These barriers do not need to enforce ordering on devices, just memory. */
+#define smp_mb()                RISCV_FENCE(rw, rw)
+#define smp_rmb()               RISCV_FENCE(r, r)
+#define smp_wmb()               RISCV_FENCE(w, w)
+#define smp_mb__before_atomic() smp_mb()
+#define smp_mb__after_atomic()  smp_mb()
+
+/*
+#define smp_store_release(p, v)         \
+do {                                    \
+    compiletime_assert_atomic_type(*p); \
+    RISCV_FENCE(rw, w);                 \
+    WRITE_ONCE(*p, v);                  \
+} while (0)
+
+#define smp_load_acquire(p)             \
+({                                      \
+    typeof(*p) p1 = READ_ONCE(*p);      \
+    compiletime_assert_atomic_type(*p); \
+    RISCV_FENCE(r,rw);                  \
+    p1;                                 \
+})
+*/
+
+static inline unsigned long local_save_flags(void)
+{
+    return csr_read(sstatus);
+}
+
+static inline void local_irq_enable(void)
+{
+    csr_set(sstatus, SSTATUS_SIE);
+}
+
+static inline void local_irq_disable(void)
+{
+    csr_clear(sstatus, SSTATUS_SIE);
+}
+
+#define local_irq_save(x)                           \
+({                                                  \
+    x = csr_read_clear(CSR_SSTATUS, SSTATUS_SIE);   \
+    local_irq_disable();                            \
+})
+
+static inline void local_irq_restore(unsigned long flags)
+{
+	csr_set(CSR_SSTATUS, flags & SSTATUS_SIE);
+}
+
+static inline bool local_irq_is_enabled(void)
+{
+    unsigned long flags = local_save_flags();
+
+    return (flags & SSTATUS_SIE) != 0;
+}
+
+#define arch_fetch_and_add(x, v) __sync_fetch_and_add(x, v)
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_RISCV_BARRIER_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */