Message ID | 4411f6af38586074b347cd6005f19f9c670faa74.1703255175.git.oleksii.kurochko@gmail.com (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | Enable build of full Xen for RISC-V | expand |
On Fri, 2023-12-22 at 17:13 +0200, Oleksii Kurochko wrote: > Signed-off-by: Oleksii Kurochko <oleksii.kurochko@gmail.com> > --- > Changes in V3: > - update the commit message > - introduce DIRECTMAP_VIRT_START. > - drop changes related pfn_to_paddr() and paddr_to_pfn as they were > remvoe in > [PATCH v2 32/39] xen/riscv: add minimal stuff to asm/page.h to > build full Xen > - code style fixes. > - drop get_page_nr and put_page_nr as they don't need for time > being > - drop CONFIG_STATIC_MEMORY related things > - code style fixes > --- > Changes in V2: > - define stub for arch_get_dma_bitsize(void) > --- > xen/arch/riscv/include/asm/config.h | 2 + > xen/arch/riscv/include/asm/mm.h | 248 > ++++++++++++++++++++++++++++ > 2 files changed, 250 insertions(+) > > diff --git a/xen/arch/riscv/include/asm/config.h > b/xen/arch/riscv/include/asm/config.h > index fb9fc9daaa..400309f4ef 100644 > --- a/xen/arch/riscv/include/asm/config.h > +++ b/xen/arch/riscv/include/asm/config.h > @@ -67,6 +67,8 @@ > > #define XEN_VIRT_START 0xFFFFFFFFC0000000 /* (_AC(-1, UL) + 1 - > GB(1)) */ > > +#define DIRECTMAP_VIRT_START SLOTN(200) > + > #define FRAMETABLE_VIRT_START SLOTN(196) > #define FRAMETABLE_SIZE GB(3) > #define FRAMETABLE_NR (FRAMETABLE_SIZE / > sizeof(*frame_table)) > diff --git a/xen/arch/riscv/include/asm/mm.h > b/xen/arch/riscv/include/asm/mm.h > index 57026e134d..14fce72fde 100644 > --- a/xen/arch/riscv/include/asm/mm.h > +++ b/xen/arch/riscv/include/asm/mm.h > @@ -3,8 +3,251 @@ > #ifndef _ASM_RISCV_MM_H > #define _ASM_RISCV_MM_H > > +#include <public/xen.h> > +#include <xen/pdx.h> > +#include <xen/types.h> > + > +#include <asm/page.h> > #include <asm/page-bits.h> > > +#define paddr_to_pdx(pa) mfn_to_pdx(maddr_to_mfn(pa)) > +#define gfn_to_gaddr(gfn) pfn_to_paddr(gfn_x(gfn)) > +#define gaddr_to_gfn(ga) _gfn(paddr_to_pfn(ga)) > +#define mfn_to_maddr(mfn) pfn_to_paddr(mfn_x(mfn)) > +#define maddr_to_mfn(ma) _mfn(paddr_to_pfn(ma)) > +#define vmap_to_mfn(va) maddr_to_mfn(virt_to_maddr((vaddr_t)va)) > +#define vmap_to_page(va) mfn_to_page(vmap_to_mfn(va)) > +#define paddr_to_pdx(pa) mfn_to_pdx(maddr_to_mfn(pa)) > +#define gfn_to_gaddr(gfn) pfn_to_paddr(gfn_x(gfn)) > +#define gaddr_to_gfn(ga) _gfn(paddr_to_pfn(ga)) > +#define mfn_to_maddr(mfn) pfn_to_paddr(mfn_x(mfn)) > +#define maddr_to_mfn(ma) _mfn(paddr_to_pfn(ma)) > +#define vmap_to_mfn(va) maddr_to_mfn(virt_to_maddr((vaddr_t)va)) > +#define vmap_to_page(va) mfn_to_page(vmap_to_mfn(va)) > + > +#define virt_to_maddr(va) ((paddr_t)((vaddr_t)(va) & PADDR_MASK)) > +#define maddr_to_virt(pa) ((void *)((paddr_t)(pa) | > DIRECTMAP_VIRT_START)) > + > +/* Convert between Xen-heap virtual addresses and machine frame > numbers. */ > +#define __virt_to_mfn(va) (virt_to_maddr(va) >> PAGE_SHIFT) > +#define __mfn_to_virt(mfn) maddr_to_virt((paddr_t)(mfn) << > PAGE_SHIFT) > + > +/* Convert between Xen-heap virtual addresses and page-info > structures. */ > +static inline struct page_info *virt_to_page(const void *v) > +{ > + BUG(); > + return NULL; > +} > + > +/* > + * We define non-underscored wrappers for above conversion > functions. > + * These are overriden in various source files while underscored > version > + * remain intact. > + */ > +#define virt_to_mfn(va) __virt_to_mfn(va) > +#define mfn_to_virt(mfn) __mfn_to_virt(mfn) > + > +struct page_info > +{ > + /* Each frame can be threaded onto a doubly-linked list. */ > + struct page_list_entry list; > + > + /* Reference count and various PGC_xxx flags and fields. */ > + unsigned long count_info; > + > + /* Context-dependent fields follow... */ > + union { > + /* Page is in use: ((count_info & PGC_count_mask) != 0). */ > + struct { > + /* Type reference count and various PGT_xxx flags and > fields. */ > + unsigned long type_info; > + } inuse; > + /* Page is on a free list: ((count_info & PGC_count_mask) == > 0). */ > + union { > + struct { > + /* > + * Index of the first *possibly* unscrubbed page in > the buddy. > + * One more bit than maximum possible order to > accommodate > + * INVALID_DIRTY_IDX. > + */ > +#define INVALID_DIRTY_IDX ((1UL << (MAX_ORDER + 1)) - 1) > + unsigned long first_dirty:MAX_ORDER + 1; > + > + /* Do TLBs need flushing for safety before next page > use? */ > + bool need_tlbflush:1; > + > +#define BUDDY_NOT_SCRUBBING 0 > +#define BUDDY_SCRUBBING 1 > +#define BUDDY_SCRUB_ABORT 2 > + unsigned long scrub_state:2; > + }; > + > + unsigned long val; > + } free; > + > + } u; > + > + union { > + /* Page is in use, but not as a shadow. */ > + struct { > + /* Owner of this page (zero if page is anonymous). */ > + struct domain *domain; > + } inuse; > + > + /* Page is on a free list. */ > + struct { > + /* Order-size of the free chunk this page is the head > of. */ > + unsigned int order; > + } free; > + > + } v; > + > + union { > + /* > + * Timestamp from 'TLB clock', used to avoid extra safety > flushes. > + * Only valid for: a) free pages, and b) pages with zero > type count > + */ > + uint32_t tlbflush_timestamp; > + }; > + uint64_t pad; > +}; > + > +#define frame_table ((struct page_info *)FRAMETABLE_VIRT_START) > + > +/* PDX of the first page in the frame table. */ > +extern unsigned long frametable_base_pdx; > + > +/* Convert between machine frame numbers and page-info structures. > */ > +#define mfn_to_page(mfn) > \ > + (frame_table + (mfn_to_pdx(mfn) - frametable_base_pdx)) > +#define page_to_mfn(pg) > \ > + pdx_to_mfn((unsigned long)((pg) - frame_table) + > frametable_base_pdx) > + > +static inline void *page_to_virt(const struct page_info *pg) > +{ > + return mfn_to_virt(mfn_x(page_to_mfn(pg))); > +} > + > +/* > + * Common code requires get_page_type and put_page_type. > + * We don't care about typecounts so we just do the minimum to make > it > + * happy. > + */ > +static inline int get_page_type(struct page_info *page, unsigned > long type) > +{ > + return 1; > +} > + > +static inline void put_page_type(struct page_info *page) > +{ > +} > + > +static inline void put_page_and_type(struct page_info *page) > +{ > + put_page_type(page); > + put_page(page); > +} > + > +/* > + * RISC-V does not have an M2P, but common code expects a handful of > + * M2P-related defines and functions. Provide dummy versions of > these. > + */ > +#define INVALID_M2P_ENTRY (~0UL) > +#define SHARED_M2P_ENTRY (~0UL - 1UL) > +#define SHARED_M2P(_e) ((_e) == SHARED_M2P_ENTRY) > + > +/* Xen always owns P2M on RISC-V */ > +#define set_gpfn_from_mfn(mfn, pfn) do { (void) (mfn), (void)(pfn); > } while (0) > +#define mfn_to_gfn(d, mfn) ((void)(d), _gfn(mfn_x(mfn))) > + > +#define PDX_GROUP_SHIFT (16 + 5) > + > +static inline unsigned long domain_get_maximum_gpfn(struct domain > *d) > +{ > + BUG(); > + return 0; > +} > + > +static inline long arch_memory_op(int op, > XEN_GUEST_HANDLE_PARAM(void) arg) > +{ > + BUG(); > + return 0; > +} > + > +/* > + * On RISCV, all the RAM is currently direct mapped in Xen. > + * Hence return always true. > + */ > +static inline bool arch_mfns_in_directmap(unsigned long mfn, > unsigned long nr) > +{ > + return true; > +} > + > +#define PG_shift(idx) (BITS_PER_LONG - (idx)) > +#define PG_mask(x, idx) (x ## UL << PG_shift(idx)) > + > +#define PGT_none PG_mask(0, 1) /* no special uses of this > page */ > +#define PGT_writable_page PG_mask(1, 1) /* has writable > mappings? */ > +#define PGT_type_mask PG_mask(1, 1) /* Bits 31 or > 63. */ > + > + /* Count of uses of this frame as its current type. */ > +#define PGT_count_width PG_shift(2) > +#define PGT_count_mask ((1UL<<PGT_count_width)-1) > + > +/* > + * Page needs to be scrubbed. Since this bit can only be set on a > page that is > + * free (i.e. in PGC_state_free) we can reuse PGC_allocated bit. > + */ > +#define _PGC_need_scrub _PGC_allocated > +#define PGC_need_scrub PGC_allocated > + > +// /* Cleared when the owning guest 'frees' this page. */ > +#define _PGC_allocated PG_shift(1) > +#define PGC_allocated PG_mask(1, 1) > + /* Page is Xen heap? */ > +#define _PGC_xen_heap PG_shift(2) > +#define PGC_xen_heap PG_mask(1, 2) > +/* Page is broken? */ > +#define _PGC_broken PG_shift(7) > +#define PGC_broken PG_mask(1, 7) > + /* Mutually-exclusive page states: { inuse, offlining, offlined, > free }. */ > +#define PGC_state PG_mask(3, 9) > +#define PGC_state_inuse PG_mask(0, 9) > +#define PGC_state_offlining PG_mask(1, 9) > +#define PGC_state_offlined PG_mask(2, 9) > +#define PGC_state_free PG_mask(3, 9) > +// #define page_state_is(pg, st) (((pg)->count_info&PGC_state) == > PGC_state_##st) > + > +/* Count of references to this frame. */ > +#define PGC_count_width PG_shift(9) > +#define PGC_count_mask ((1UL<<PGC_count_width)-1) > + > +#define page_state_is(pg, st) (((pg)->count_info&PGC_state) == > PGC_state_##st) > + > +#define _PGC_extra PG_shift(10) > +#define PGC_extra PG_mask(1, 10) > + > +#define is_xen_heap_page(page) ((page)->count_info & PGC_xen_heap) > +#define is_xen_heap_mfn(mfn) \ > + (mfn_valid(mfn) && is_xen_heap_page(mfn_to_page(mfn))) > + > +#define is_xen_fixed_mfn(mfn) \ > + ((mfn_to_maddr(mfn) >= virt_to_maddr(&_start)) && \ > + (mfn_to_maddr(mfn) <= virt_to_maddr((vaddr_t)_end - 1))) > + > +#define page_get_owner(_p) (_p)->v.inuse.domain > +#define page_set_owner(_p,_d) ((_p)->v.inuse.domain = (_d)) > + > +/* TODO: implement */ > +#define mfn_valid(mfn) ({ (void) (mfn); 0; }) > + > +#define mfn_to_gfn(d, mfn) ((void)(d), _gfn(mfn_x(mfn))) > + > +#define domain_set_alloc_bitsize(d) ((void)0) > +#define domain_clamp_alloc_bitsize(d, b) (b) > + > +#define PFN_ORDER(_pfn) ((_pfn)->v.free.order) I missed saving these changes. It should be _pfn -> pfn_. (Just a reminder for me). Sorry for the inconvenience. > + > extern unsigned char cpu0_boot_stack[]; > > void setup_initial_pagetables(void); > @@ -17,4 +260,9 @@ unsigned long calc_phys_offset(void); > > void turn_on_mmu(unsigned long ra); > > +static inline unsigned int arch_get_dma_bitsize(void) > +{ > + return 32; /* TODO */ > +} > + > #endif /* _ASM_RISCV_MM_H */
On Fri, 2023-12-22 at 18:32 +0200, Oleksii wrote: > > + > > +struct page_info > > +{ > > + /* Each frame can be threaded onto a doubly-linked list. */ > > + struct page_list_entry list; > > + > > + /* Reference count and various PGC_xxx flags and fields. */ > > + unsigned long count_info; > > + > > + /* Context-dependent fields follow... */ > > + union { > > + /* Page is in use: ((count_info & PGC_count_mask) != 0). > > */ > > + struct { > > + /* Type reference count and various PGT_xxx flags and > > fields. */ > > + unsigned long type_info; > > + } inuse; > > + /* Page is on a free list: ((count_info & PGC_count_mask) > > == > > 0). */ > > + union { > > + struct { > > + /* > > + * Index of the first *possibly* unscrubbed page > > in > > the buddy. > > + * One more bit than maximum possible order to > > accommodate > > + * INVALID_DIRTY_IDX. > > + */ > > +#define INVALID_DIRTY_IDX ((1UL << (MAX_ORDER + 1)) - 1) > > + unsigned long first_dirty:MAX_ORDER + 1; > > + > > + /* Do TLBs need flushing for safety before next > > page > > use? */ > > + bool need_tlbflush:1; > > + > > +#define BUDDY_NOT_SCRUBBING 0 > > +#define BUDDY_SCRUBBING 1 > > +#define BUDDY_SCRUB_ABORT 2 > > + unsigned long scrub_state:2; > > + }; > > + > > + unsigned long val; > > + } free; > > + > > + } u; > > + > > + union { > > + /* Page is in use, but not as a shadow. */ > > + struct { > > + /* Owner of this page (zero if page is anonymous). */ > > + struct domain *domain; > > + } inuse; > > + > > + /* Page is on a free list. */ > > + struct { > > + /* Order-size of the free chunk this page is the head > > of. */ > > + unsigned int order; > > + } free; > > + > > + } v; > > + > > + union { > > + /* > > + * Timestamp from 'TLB clock', used to avoid extra safety > > flushes. > > + * Only valid for: a) free pages, and b) pages with zero > > type count > > + */ > > + uint32_t tlbflush_timestamp; > > + }; > > + uint64_t pad; I think it can be removed too. The changes weren't saved. ( Just another one reminder for me ). Sorry for the convenience. ~ Oleksii >
On 22.12.2023 17:32, Oleksii wrote: >> +#define PFN_ORDER(_pfn) ((_pfn)->v.free.order) > I missed saving these changes. It should be _pfn -> pfn_. (Just a > reminder for me). And what purpose would the trailing underscore serve here? Jan
On Thu, 2024-01-11 at 17:43 +0100, Jan Beulich wrote: > On 22.12.2023 17:32, Oleksii wrote: > > > +#define PFN_ORDER(_pfn) ((_pfn)->v.free.order) > > I missed saving these changes. It should be _pfn -> pfn_. (Just a > > reminder for me). > > And what purpose would the trailing underscore serve here? There is no any. I'll use just pfn. Thanks for noticing that. ~ Oleksii
On 22.12.2023 16:13, Oleksii Kurochko wrote: > Signed-off-by: Oleksii Kurochko <oleksii.kurochko@gmail.com> > --- > Changes in V3: > - update the commit message ??? (yet again) > --- a/xen/arch/riscv/include/asm/mm.h > +++ b/xen/arch/riscv/include/asm/mm.h > @@ -3,8 +3,251 @@ > #ifndef _ASM_RISCV_MM_H > #define _ASM_RISCV_MM_H > > +#include <public/xen.h> > +#include <xen/pdx.h> > +#include <xen/types.h> > + > +#include <asm/page.h> > #include <asm/page-bits.h> > > +#define paddr_to_pdx(pa) mfn_to_pdx(maddr_to_mfn(pa)) > +#define gfn_to_gaddr(gfn) pfn_to_paddr(gfn_x(gfn)) > +#define gaddr_to_gfn(ga) _gfn(paddr_to_pfn(ga)) > +#define mfn_to_maddr(mfn) pfn_to_paddr(mfn_x(mfn)) > +#define maddr_to_mfn(ma) _mfn(paddr_to_pfn(ma)) > +#define vmap_to_mfn(va) maddr_to_mfn(virt_to_maddr((vaddr_t)va)) > +#define vmap_to_page(va) mfn_to_page(vmap_to_mfn(va)) Everything you have above ... > +#define paddr_to_pdx(pa) mfn_to_pdx(maddr_to_mfn(pa)) > +#define gfn_to_gaddr(gfn) pfn_to_paddr(gfn_x(gfn)) > +#define gaddr_to_gfn(ga) _gfn(paddr_to_pfn(ga)) > +#define mfn_to_maddr(mfn) pfn_to_paddr(mfn_x(mfn)) > +#define maddr_to_mfn(ma) _mfn(paddr_to_pfn(ma)) > +#define vmap_to_mfn(va) maddr_to_mfn(virt_to_maddr((vaddr_t)va)) > +#define vmap_to_page(va) mfn_to_page(vmap_to_mfn(va)) ... appears a 2nd time right afterwards. > +#define virt_to_maddr(va) ((paddr_t)((vaddr_t)(va) & PADDR_MASK)) > +#define maddr_to_virt(pa) ((void *)((paddr_t)(pa) | DIRECTMAP_VIRT_START)) > + > +/* Convert between Xen-heap virtual addresses and machine frame numbers. */ > +#define __virt_to_mfn(va) (virt_to_maddr(va) >> PAGE_SHIFT) > +#define __mfn_to_virt(mfn) maddr_to_virt((paddr_t)(mfn) << PAGE_SHIFT) These would imo better use maddr_to_mfn() and mfn_to_maddr(), rather than kind of open-coding them. The former could also use PFN_DOWN() as an alternative. > +/* Convert between Xen-heap virtual addresses and page-info structures. */ > +static inline struct page_info *virt_to_page(const void *v) > +{ > + BUG(); > + return NULL; > +} > + > +/* > + * We define non-underscored wrappers for above conversion functions. > + * These are overriden in various source files while underscored version > + * remain intact. > + */ > +#define virt_to_mfn(va) __virt_to_mfn(va) > +#define mfn_to_virt(mfn) __mfn_to_virt(mfn) Is this really still needed? Would be pretty nice if in a new port we could get to start cleanly right away (i.e. by not needing per-file overrides, but using type-safe expansions here right away). > +struct page_info > +{ > + /* Each frame can be threaded onto a doubly-linked list. */ > + struct page_list_entry list; > + > + /* Reference count and various PGC_xxx flags and fields. */ > + unsigned long count_info; > + > + /* Context-dependent fields follow... */ > + union { > + /* Page is in use: ((count_info & PGC_count_mask) != 0). */ > + struct { > + /* Type reference count and various PGT_xxx flags and fields. */ > + unsigned long type_info; > + } inuse; > + /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */ > + union { > + struct { > + /* > + * Index of the first *possibly* unscrubbed page in the buddy. > + * One more bit than maximum possible order to accommodate > + * INVALID_DIRTY_IDX. > + */ > +#define INVALID_DIRTY_IDX ((1UL << (MAX_ORDER + 1)) - 1) > + unsigned long first_dirty:MAX_ORDER + 1; > + > + /* Do TLBs need flushing for safety before next page use? */ > + bool need_tlbflush:1; > + > +#define BUDDY_NOT_SCRUBBING 0 > +#define BUDDY_SCRUBBING 1 > +#define BUDDY_SCRUB_ABORT 2 > + unsigned long scrub_state:2; > + }; > + > + unsigned long val; > + } free; Indentation is wrong (and thus misleading) for these two lines. > + > + } u; Nit: I don't see the value of the trailing blank line inside the union. > + union { > + /* Page is in use, but not as a shadow. */ I question the appicability of "shadow" here. > + struct { > + /* Owner of this page (zero if page is anonymous). */ > + struct domain *domain; > + } inuse; > + > + /* Page is on a free list. */ > + struct { > + /* Order-size of the free chunk this page is the head of. */ > + unsigned int order; > + } free; > + > + } v; > + > + union { > + /* > + * Timestamp from 'TLB clock', used to avoid extra safety flushes. > + * Only valid for: a) free pages, and b) pages with zero type count > + */ > + uint32_t tlbflush_timestamp; > + }; > + uint64_t pad; > +}; > + > +#define frame_table ((struct page_info *)FRAMETABLE_VIRT_START) > + > +/* PDX of the first page in the frame table. */ > +extern unsigned long frametable_base_pdx; From this I conclude memory on RISC-V systems may not start at (or near) 0? > +/* Convert between machine frame numbers and page-info structures. */ > +#define mfn_to_page(mfn) \ > + (frame_table + (mfn_to_pdx(mfn) - frametable_base_pdx)) > +#define page_to_mfn(pg) \ > + pdx_to_mfn((unsigned long)((pg) - frame_table) + frametable_base_pdx) > + > +static inline void *page_to_virt(const struct page_info *pg) > +{ > + return mfn_to_virt(mfn_x(page_to_mfn(pg))); > +} > + > +/* > + * Common code requires get_page_type and put_page_type. > + * We don't care about typecounts so we just do the minimum to make it > + * happy. > + */ > +static inline int get_page_type(struct page_info *page, unsigned long type) > +{ > + return 1; > +} > + > +static inline void put_page_type(struct page_info *page) > +{ > +} > + > +static inline void put_page_and_type(struct page_info *page) > +{ > + put_page_type(page); > + put_page(page); > +} > + > +/* > + * RISC-V does not have an M2P, but common code expects a handful of > + * M2P-related defines and functions. Provide dummy versions of these. > + */ > +#define INVALID_M2P_ENTRY (~0UL) > +#define SHARED_M2P_ENTRY (~0UL - 1UL) > +#define SHARED_M2P(_e) ((_e) == SHARED_M2P_ENTRY) > + > +/* Xen always owns P2M on RISC-V */ > +#define set_gpfn_from_mfn(mfn, pfn) do { (void) (mfn), (void)(pfn); } while (0) Nit: Stray blank again after cast. > +#define mfn_to_gfn(d, mfn) ((void)(d), _gfn(mfn_x(mfn))) What's the relation of the comment with these two #define-s? > +#define PDX_GROUP_SHIFT (16 + 5) > + > +static inline unsigned long domain_get_maximum_gpfn(struct domain *d) > +{ > + BUG(); > + return 0; > +} > + > +static inline long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg) > +{ > + BUG(); > + return 0; > +} > + > +/* > + * On RISCV, all the RAM is currently direct mapped in Xen. > + * Hence return always true. > + */ > +static inline bool arch_mfns_in_directmap(unsigned long mfn, unsigned long nr) > +{ > + return true; > +} > + > +#define PG_shift(idx) (BITS_PER_LONG - (idx)) > +#define PG_mask(x, idx) (x ## UL << PG_shift(idx)) > + > +#define PGT_none PG_mask(0, 1) /* no special uses of this page */ > +#define PGT_writable_page PG_mask(1, 1) /* has writable mappings? */ > +#define PGT_type_mask PG_mask(1, 1) /* Bits 31 or 63. */ > + > + /* Count of uses of this frame as its current type. */ > +#define PGT_count_width PG_shift(2) > +#define PGT_count_mask ((1UL<<PGT_count_width)-1) Nit: Style (missing blanks around binary operators). Also a few more times further down. > +/* > + * Page needs to be scrubbed. Since this bit can only be set on a page that is > + * free (i.e. in PGC_state_free) we can reuse PGC_allocated bit. > + */ > +#define _PGC_need_scrub _PGC_allocated > +#define PGC_need_scrub PGC_allocated > + > +// /* Cleared when the owning guest 'frees' this page. */ Why a commented out comment? > +#define _PGC_allocated PG_shift(1) > +#define PGC_allocated PG_mask(1, 1) > + /* Page is Xen heap? */ > +#define _PGC_xen_heap PG_shift(2) > +#define PGC_xen_heap PG_mask(1, 2) > +/* Page is broken? */ > +#define _PGC_broken PG_shift(7) > +#define PGC_broken PG_mask(1, 7) > + /* Mutually-exclusive page states: { inuse, offlining, offlined, free }. */ Can similar comments in this block please all be similarly indented (or not)? > +#define PGC_state PG_mask(3, 9) > +#define PGC_state_inuse PG_mask(0, 9) > +#define PGC_state_offlining PG_mask(1, 9) > +#define PGC_state_offlined PG_mask(2, 9) > +#define PGC_state_free PG_mask(3, 9) > +// #define page_state_is(pg, st) (((pg)->count_info&PGC_state) == PGC_state_##st) ??? > +/* Count of references to this frame. */ > +#define PGC_count_width PG_shift(9) > +#define PGC_count_mask ((1UL<<PGC_count_width)-1) > + > +#define page_state_is(pg, st) (((pg)->count_info&PGC_state) == PGC_state_##st) And here it then "properly" appears? > +#define _PGC_extra PG_shift(10) > +#define PGC_extra PG_mask(1, 10) > + > +#define is_xen_heap_page(page) ((page)->count_info & PGC_xen_heap) > +#define is_xen_heap_mfn(mfn) \ > + (mfn_valid(mfn) && is_xen_heap_page(mfn_to_page(mfn))) > + > +#define is_xen_fixed_mfn(mfn) \ > + ((mfn_to_maddr(mfn) >= virt_to_maddr(&_start)) && \ > + (mfn_to_maddr(mfn) <= virt_to_maddr((vaddr_t)_end - 1))) Why does _start need prefixing wuth & and _end prefixing with a cast? First and foremost both want to be consistent. And then preferably with as little extra clutter as possible. > +#define page_get_owner(_p) (_p)->v.inuse.domain > +#define page_set_owner(_p,_d) ((_p)->v.inuse.domain = (_d)) > + > +/* TODO: implement */ > +#define mfn_valid(mfn) ({ (void) (mfn); 0; }) > + > +#define mfn_to_gfn(d, mfn) ((void)(d), _gfn(mfn_x(mfn))) This appeared further up already. > +#define domain_set_alloc_bitsize(d) ((void)0) Better ((void)(d)) ? And then ... > +#define domain_clamp_alloc_bitsize(d, b) (b) ... ((void)(d), (b)) here? Jan
On Tue, 2024-01-23 at 14:03 +0100, Jan Beulich wrote: > On 22.12.2023 16:13, Oleksii Kurochko wrote: > > Signed-off-by: Oleksii Kurochko <oleksii.kurochko@gmail.com> > > --- > > Changes in V3: > > - update the commit message > > ??? (yet again) asm/mm.h was changed to mm.h > > > --- a/xen/arch/riscv/include/asm/mm.h > > +++ b/xen/arch/riscv/include/asm/mm.h > > @@ -3,8 +3,251 @@ > > #ifndef _ASM_RISCV_MM_H > > #define _ASM_RISCV_MM_H > > > > +#include <public/xen.h> > > +#include <xen/pdx.h> > > +#include <xen/types.h> > > + > > +#include <asm/page.h> > > #include <asm/page-bits.h> > > > > +#define paddr_to_pdx(pa) mfn_to_pdx(maddr_to_mfn(pa)) > > +#define gfn_to_gaddr(gfn) pfn_to_paddr(gfn_x(gfn)) > > +#define gaddr_to_gfn(ga) _gfn(paddr_to_pfn(ga)) > > +#define mfn_to_maddr(mfn) pfn_to_paddr(mfn_x(mfn)) > > +#define maddr_to_mfn(ma) _mfn(paddr_to_pfn(ma)) > > +#define vmap_to_mfn(va) > > maddr_to_mfn(virt_to_maddr((vaddr_t)va)) > > +#define vmap_to_page(va) mfn_to_page(vmap_to_mfn(va)) > > Everything you have above ... > > > +#define paddr_to_pdx(pa) mfn_to_pdx(maddr_to_mfn(pa)) > > +#define gfn_to_gaddr(gfn) pfn_to_paddr(gfn_x(gfn)) > > +#define gaddr_to_gfn(ga) _gfn(paddr_to_pfn(ga)) > > +#define mfn_to_maddr(mfn) pfn_to_paddr(mfn_x(mfn)) > > +#define maddr_to_mfn(ma) _mfn(paddr_to_pfn(ma)) > > +#define vmap_to_mfn(va) > > maddr_to_mfn(virt_to_maddr((vaddr_t)va)) > > +#define vmap_to_page(va) mfn_to_page(vmap_to_mfn(va)) > > ... appears a 2nd time right afterwards. Hmm, looks like rebase issue. I'll drop a copy. Thanks. > > > +#define virt_to_maddr(va) ((paddr_t)((vaddr_t)(va) & PADDR_MASK)) > > +#define maddr_to_virt(pa) ((void *)((paddr_t)(pa) | > > DIRECTMAP_VIRT_START)) > > + > > +/* Convert between Xen-heap virtual addresses and machine frame > > numbers. */ > > +#define __virt_to_mfn(va) (virt_to_maddr(va) >> PAGE_SHIFT) > > +#define __mfn_to_virt(mfn) maddr_to_virt((paddr_t)(mfn) << > > PAGE_SHIFT) > > These would imo better use maddr_to_mfn() and mfn_to_maddr(), rather > than > kind of open-coding them. The former could also use PFN_DOWN() as an > alternative. Thanks. I'll take that into account. > > > +/* Convert between Xen-heap virtual addresses and page-info > > structures. */ > > +static inline struct page_info *virt_to_page(const void *v) > > +{ > > + BUG(); > > + return NULL; > > +} > > + > > +/* > > + * We define non-underscored wrappers for above conversion > > functions. > > + * These are overriden in various source files while underscored > > version > > + * remain intact. > > + */ > > +#define virt_to_mfn(va) __virt_to_mfn(va) > > +#define mfn_to_virt(mfn) __mfn_to_virt(mfn) > > Is this really still needed? Would be pretty nice if in a new port we > could get to start cleanly right away (i.e. by not needing per-file > overrides, but using type-safe expansions here right away). Yes, we can just rename __virt_to_mfn and __mfn_to_virt and updated it accordingly to your previous comment. > > > +struct page_info > > +{ > > + /* Each frame can be threaded onto a doubly-linked list. */ > > + struct page_list_entry list; > > + > > + /* Reference count and various PGC_xxx flags and fields. */ > > + unsigned long count_info; > > + > > + /* Context-dependent fields follow... */ > > + union { > > + /* Page is in use: ((count_info & PGC_count_mask) != 0). > > */ > > + struct { > > + /* Type reference count and various PGT_xxx flags and > > fields. */ > > + unsigned long type_info; > > + } inuse; > > + /* Page is on a free list: ((count_info & PGC_count_mask) > > == 0). */ > > + union { > > + struct { > > + /* > > + * Index of the first *possibly* unscrubbed page > > in the buddy. > > + * One more bit than maximum possible order to > > accommodate > > + * INVALID_DIRTY_IDX. > > + */ > > +#define INVALID_DIRTY_IDX ((1UL << (MAX_ORDER + 1)) - 1) > > + unsigned long first_dirty:MAX_ORDER + 1; > > + > > + /* Do TLBs need flushing for safety before next > > page use? */ > > + bool need_tlbflush:1; > > + > > +#define BUDDY_NOT_SCRUBBING 0 > > +#define BUDDY_SCRUBBING 1 > > +#define BUDDY_SCRUB_ABORT 2 > > + unsigned long scrub_state:2; > > + }; > > + > > + unsigned long val; > > + } free; > > Indentation is wrong (and thus misleading) for these two lines. I'll correct it. > > > + > > + } u; > > Nit: I don't see the value of the trailing blank line inside the > union. Sure, there is no any sense. > > > + union { > > + /* Page is in use, but not as a shadow. */ > > I question the appicability of "shadow" here. > > > + struct { > > + /* Owner of this page (zero if page is anonymous). */ > > + struct domain *domain; > > + } inuse; > > + > > + /* Page is on a free list. */ > > + struct { > > + /* Order-size of the free chunk this page is the head > > of. */ > > + unsigned int order; > > + } free; > > + > > + } v; > > + > > + union { > > + /* > > + * Timestamp from 'TLB clock', used to avoid extra safety > > flushes. > > + * Only valid for: a) free pages, and b) pages with zero > > type count > > + */ > > + uint32_t tlbflush_timestamp; > > + }; > > + uint64_t pad; > > +}; > > + > > +#define frame_table ((struct page_info *)FRAMETABLE_VIRT_START) > > + > > +/* PDX of the first page in the frame table. */ > > +extern unsigned long frametable_base_pdx; > > From this I conclude memory on RISC-V systems may not start at (or > near) 0? I am not sure that it is impossible at all, but all platforms I saw it was always not 0 and pretty big values. For example, on real platform, there is =0000004000000000. In QEMU, it is 0x800...0 > > > +/* Convert between machine frame numbers and page-info structures. > > */ > > +#define > > mfn_to_page(mfn) \ > > + (frame_table + (mfn_to_pdx(mfn) - frametable_base_pdx)) > > +#define > > page_to_mfn(pg) \ > > + pdx_to_mfn((unsigned long)((pg) - frame_table) + > > frametable_base_pdx) > > + > > +static inline void *page_to_virt(const struct page_info *pg) > > +{ > > + return mfn_to_virt(mfn_x(page_to_mfn(pg))); > > +} > > + > > +/* > > + * Common code requires get_page_type and put_page_type. > > + * We don't care about typecounts so we just do the minimum to > > make it > > + * happy. > > + */ > > +static inline int get_page_type(struct page_info *page, unsigned > > long type) > > +{ > > + return 1; > > +} > > + > > +static inline void put_page_type(struct page_info *page) > > +{ > > +} > > + > > +static inline void put_page_and_type(struct page_info *page) > > +{ > > + put_page_type(page); > > + put_page(page); > > +} > > + > > +/* > > + * RISC-V does not have an M2P, but common code expects a handful > > of > > + * M2P-related defines and functions. Provide dummy versions of > > these. > > + */ > > +#define INVALID_M2P_ENTRY (~0UL) > > +#define SHARED_M2P_ENTRY (~0UL - 1UL) > > +#define SHARED_M2P(_e) ((_e) == SHARED_M2P_ENTRY) > > + > > +/* Xen always owns P2M on RISC-V */ > > +#define set_gpfn_from_mfn(mfn, pfn) do { (void) (mfn), > > (void)(pfn); } while (0) > > Nit: Stray blank again after cast. I'll update this. Thanks. > > > +#define mfn_to_gfn(d, mfn) ((void)(d), _gfn(mfn_x(mfn))) > > What's the relation of the comment with these two #define-s? I don't know, it was copied just from Arm. I think it would be better to drop a comment and just define macros as BUG_ON("uimplemented") for time being. > > > +#define PDX_GROUP_SHIFT (16 + 5) > > + > > +static inline unsigned long domain_get_maximum_gpfn(struct domain > > *d) > > +{ > > + BUG(); > > + return 0; > > +} > > + > > +static inline long arch_memory_op(int op, > > XEN_GUEST_HANDLE_PARAM(void) arg) > > +{ > > + BUG(); > > + return 0; > > +} > > + > > +/* > > + * On RISCV, all the RAM is currently direct mapped in Xen. > > + * Hence return always true. > > + */ > > +static inline bool arch_mfns_in_directmap(unsigned long mfn, > > unsigned long nr) > > +{ > > + return true; > > +} > > + > > +#define PG_shift(idx) (BITS_PER_LONG - (idx)) > > +#define PG_mask(x, idx) (x ## UL << PG_shift(idx)) > > + > > +#define PGT_none PG_mask(0, 1) /* no special uses of > > this page */ > > +#define PGT_writable_page PG_mask(1, 1) /* has writable > > mappings? */ > > +#define PGT_type_mask PG_mask(1, 1) /* Bits 31 or > > 63. */ > > + > > + /* Count of uses of this frame as its current type. */ > > +#define PGT_count_width PG_shift(2) > > +#define PGT_count_mask ((1UL<<PGT_count_width)-1) > > Nit: Style (missing blanks around binary operators). Also a few more > times further down. Thanks. I'll update. > > > +/* > > + * Page needs to be scrubbed. Since this bit can only be set on a > > page that is > > + * free (i.e. in PGC_state_free) we can reuse PGC_allocated bit. > > + */ > > +#define _PGC_need_scrub _PGC_allocated > > +#define PGC_need_scrub PGC_allocated > > + > > +// /* Cleared when the owning guest 'frees' this page. */ > > Why a commented out comment? Missed to remove, my IDE using this type of comment by default. and I commented all the file when tried to find minimal of changes needed for Xen build. > > > +#define _PGC_allocated PG_shift(1) > > +#define PGC_allocated PG_mask(1, 1) > > + /* Page is Xen heap? */ > > +#define _PGC_xen_heap PG_shift(2) > > +#define PGC_xen_heap PG_mask(1, 2) > > +/* Page is broken? */ > > +#define _PGC_broken PG_shift(7) > > +#define PGC_broken PG_mask(1, 7) > > + /* Mutually-exclusive page states: { inuse, offlining, offlined, > > free }. */ > > Can similar comments in this block please all be similarly indented > (or not)? Sure. I'll update that. > > > +#define PGC_state PG_mask(3, 9) > > +#define PGC_state_inuse PG_mask(0, 9) > > +#define PGC_state_offlining PG_mask(1, 9) > > +#define PGC_state_offlined PG_mask(2, 9) > > +#define PGC_state_free PG_mask(3, 9) > > +// #define page_state_is(pg, st) (((pg)->count_info&PGC_state) == > > PGC_state_##st) > > ??? The same as above, just missed to remove that line. > > > +/* Count of references to this frame. */ > > +#define PGC_count_width PG_shift(9) > > +#define PGC_count_mask ((1UL<<PGC_count_width)-1) > > + > > +#define page_state_is(pg, st) (((pg)->count_info&PGC_state) == > > PGC_state_##st) > > And here it then "properly" appears? > > > +#define _PGC_extra PG_shift(10) > > +#define PGC_extra PG_mask(1, 10) > > + > > +#define is_xen_heap_page(page) ((page)->count_info & PGC_xen_heap) > > +#define is_xen_heap_mfn(mfn) \ > > + (mfn_valid(mfn) && is_xen_heap_page(mfn_to_page(mfn))) > > + > > +#define is_xen_fixed_mfn(mfn) \ > > + ((mfn_to_maddr(mfn) >= virt_to_maddr(&_start)) && \ > > + (mfn_to_maddr(mfn) <= virt_to_maddr((vaddr_t)_end - 1))) > > Why does _start need prefixing wuth & and _end prefixing with a cast? > First and foremost both want to be consistent. And then preferably > with as little extra clutter as possible. This is how it was defined in Arm. I think it both can be casted. I'll update that. Thanks. > > > +#define page_get_owner(_p) (_p)->v.inuse.domain > > +#define page_set_owner(_p,_d) ((_p)->v.inuse.domain = (_d)) > > + > > +/* TODO: implement */ > > +#define mfn_valid(mfn) ({ (void) (mfn); 0; }) > > + > > +#define mfn_to_gfn(d, mfn) ((void)(d), _gfn(mfn_x(mfn))) > > This appeared further up already. > > > +#define domain_set_alloc_bitsize(d) ((void)0) > > Better ((void)(d)) ? And then ... > > > +#define domain_clamp_alloc_bitsize(d, b) (b) > > ... ((void)(d), (b)) here? I'll update properly. Thanks. ~ Oleksii
On 23.01.2024 18:27, Oleksii wrote: > On Tue, 2024-01-23 at 14:03 +0100, Jan Beulich wrote: >> On 22.12.2023 16:13, Oleksii Kurochko wrote: >>> +#define _PGC_extra PG_shift(10) >>> +#define PGC_extra PG_mask(1, 10) >>> + >>> +#define is_xen_heap_page(page) ((page)->count_info & PGC_xen_heap) >>> +#define is_xen_heap_mfn(mfn) \ >>> + (mfn_valid(mfn) && is_xen_heap_page(mfn_to_page(mfn))) >>> + >>> +#define is_xen_fixed_mfn(mfn) \ >>> + ((mfn_to_maddr(mfn) >= virt_to_maddr(&_start)) && \ >>> + (mfn_to_maddr(mfn) <= virt_to_maddr((vaddr_t)_end - 1))) >> >> Why does _start need prefixing wuth & and _end prefixing with a cast? >> First and foremost both want to be consistent. And then preferably >> with as little extra clutter as possible. > This is how it was defined in Arm. I think it both can be casted. > I'll update that. Judging from your present use of virt_to_maddr(&_start), I'd assume you're fine without casts. And when casts aren't needed, they're better avoided. Jan
On Tue, 2024-01-23 at 14:03 +0100, Jan Beulich wrote: > On 22.12.2023 16:13, Oleksii Kurochko wrote: > > Signed-off-by: Oleksii Kurochko <oleksii.kurochko@gmail.com> > > --- > > Changes in V3: > > - update the commit message > > ??? (yet again) > > > --- a/xen/arch/riscv/include/asm/mm.h > > +++ b/xen/arch/riscv/include/asm/mm.h > > @@ -3,8 +3,251 @@ > > #ifndef _ASM_RISCV_MM_H > > #define _ASM_RISCV_MM_H > > > > +#include <public/xen.h> > > +#include <xen/pdx.h> > > +#include <xen/types.h> > > + > > +#include <asm/page.h> > > #include <asm/page-bits.h> > > > > +#define paddr_to_pdx(pa) mfn_to_pdx(maddr_to_mfn(pa)) > > +#define gfn_to_gaddr(gfn) pfn_to_paddr(gfn_x(gfn)) > > +#define gaddr_to_gfn(ga) _gfn(paddr_to_pfn(ga)) > > +#define mfn_to_maddr(mfn) pfn_to_paddr(mfn_x(mfn)) > > +#define maddr_to_mfn(ma) _mfn(paddr_to_pfn(ma)) > > +#define vmap_to_mfn(va) > > maddr_to_mfn(virt_to_maddr((vaddr_t)va)) > > +#define vmap_to_page(va) mfn_to_page(vmap_to_mfn(va)) > > Everything you have above ... > > > +#define paddr_to_pdx(pa) mfn_to_pdx(maddr_to_mfn(pa)) > > +#define gfn_to_gaddr(gfn) pfn_to_paddr(gfn_x(gfn)) > > +#define gaddr_to_gfn(ga) _gfn(paddr_to_pfn(ga)) > > +#define mfn_to_maddr(mfn) pfn_to_paddr(mfn_x(mfn)) > > +#define maddr_to_mfn(ma) _mfn(paddr_to_pfn(ma)) > > +#define vmap_to_mfn(va) > > maddr_to_mfn(virt_to_maddr((vaddr_t)va)) > > +#define vmap_to_page(va) mfn_to_page(vmap_to_mfn(va)) > > ... appears a 2nd time right afterwards. > > > +#define virt_to_maddr(va) ((paddr_t)((vaddr_t)(va) & PADDR_MASK)) > > +#define maddr_to_virt(pa) ((void *)((paddr_t)(pa) | > > DIRECTMAP_VIRT_START)) > > + > > +/* Convert between Xen-heap virtual addresses and machine frame > > numbers. */ > > +#define __virt_to_mfn(va) (virt_to_maddr(va) >> PAGE_SHIFT) > > +#define __mfn_to_virt(mfn) maddr_to_virt((paddr_t)(mfn) << > > PAGE_SHIFT) > > These would imo better use maddr_to_mfn() and mfn_to_maddr(), rather > than > kind of open-coding them. The former could also use PFN_DOWN() as an > alternative. We can't to as __virt_to_mfn() when is used it is usually wrapped by _mfn() which expect to have unsigned long as an argument. > > > +/* Convert between Xen-heap virtual addresses and page-info > > structures. */ > > +static inline struct page_info *virt_to_page(const void *v) > > +{ > > + BUG(); > > + return NULL; > > +} > > + > > +/* > > + * We define non-underscored wrappers for above conversion > > functions. > > + * These are overriden in various source files while underscored > > version > > + * remain intact. > > + */ > > +#define virt_to_mfn(va) __virt_to_mfn(va) > > +#define mfn_to_virt(mfn) __mfn_to_virt(mfn) > > Is this really still needed? Would be pretty nice if in a new port we > could get to start cleanly right away (i.e. by not needing per-file > overrides, but using type-safe expansions here right away). We still need __virt_to_mfn and __mfn_to_virt as common code use them: * xen/common/xenoprof.c:24:#define virt_to_mfn(va) mfn(__virt_to_mfn(va)) * xen/include/xen/domain_page.h:59:#define domain_page_map_to_mfn(ptr) _mfn(__virt_to_mfn((unsigned long)(ptr))) ~ Oleksii > > > +struct page_info > > +{ > > + /* Each frame can be threaded onto a doubly-linked list. */ > > + struct page_list_entry list; > > + > > + /* Reference count and various PGC_xxx flags and fields. */ > > + unsigned long count_info; > > + > > + /* Context-dependent fields follow... */ > > + union { > > + /* Page is in use: ((count_info & PGC_count_mask) != 0). > > */ > > + struct { > > + /* Type reference count and various PGT_xxx flags and > > fields. */ > > + unsigned long type_info; > > + } inuse; > > + /* Page is on a free list: ((count_info & PGC_count_mask) > > == 0). */ > > + union { > > + struct { > > + /* > > + * Index of the first *possibly* unscrubbed page > > in the buddy. > > + * One more bit than maximum possible order to > > accommodate > > + * INVALID_DIRTY_IDX. > > + */ > > +#define INVALID_DIRTY_IDX ((1UL << (MAX_ORDER + 1)) - 1) > > + unsigned long first_dirty:MAX_ORDER + 1; > > + > > + /* Do TLBs need flushing for safety before next > > page use? */ > > + bool need_tlbflush:1; > > + > > +#define BUDDY_NOT_SCRUBBING 0 > > +#define BUDDY_SCRUBBING 1 > > +#define BUDDY_SCRUB_ABORT 2 > > + unsigned long scrub_state:2; > > + }; > > + > > + unsigned long val; > > + } free; > > Indentation is wrong (and thus misleading) for these two lines. > > > + > > + } u; > > Nit: I don't see the value of the trailing blank line inside the > union. > > > + union { > > + /* Page is in use, but not as a shadow. */ > > I question the appicability of "shadow" here. > > > + struct { > > + /* Owner of this page (zero if page is anonymous). */ > > + struct domain *domain; > > + } inuse; > > + > > + /* Page is on a free list. */ > > + struct { > > + /* Order-size of the free chunk this page is the head > > of. */ > > + unsigned int order; > > + } free; > > + > > + } v; > > + > > + union { > > + /* > > + * Timestamp from 'TLB clock', used to avoid extra safety > > flushes. > > + * Only valid for: a) free pages, and b) pages with zero > > type count > > + */ > > + uint32_t tlbflush_timestamp; > > + }; > > + uint64_t pad; > > +}; > > + > > +#define frame_table ((struct page_info *)FRAMETABLE_VIRT_START) > > + > > +/* PDX of the first page in the frame table. */ > > +extern unsigned long frametable_base_pdx; > > From this I conclude memory on RISC-V systems may not start at (or > near) 0? > > > +/* Convert between machine frame numbers and page-info structures. > > */ > > +#define > > mfn_to_page(mfn) \ > > + (frame_table + (mfn_to_pdx(mfn) - frametable_base_pdx)) > > +#define > > page_to_mfn(pg) \ > > + pdx_to_mfn((unsigned long)((pg) - frame_table) + > > frametable_base_pdx) > > + > > +static inline void *page_to_virt(const struct page_info *pg) > > +{ > > + return mfn_to_virt(mfn_x(page_to_mfn(pg))); > > +} > > + > > +/* > > + * Common code requires get_page_type and put_page_type. > > + * We don't care about typecounts so we just do the minimum to > > make it > > + * happy. > > + */ > > +static inline int get_page_type(struct page_info *page, unsigned > > long type) > > +{ > > + return 1; > > +} > > + > > +static inline void put_page_type(struct page_info *page) > > +{ > > +} > > + > > +static inline void put_page_and_type(struct page_info *page) > > +{ > > + put_page_type(page); > > + put_page(page); > > +} > > + > > +/* > > + * RISC-V does not have an M2P, but common code expects a handful > > of > > + * M2P-related defines and functions. Provide dummy versions of > > these. > > + */ > > +#define INVALID_M2P_ENTRY (~0UL) > > +#define SHARED_M2P_ENTRY (~0UL - 1UL) > > +#define SHARED_M2P(_e) ((_e) == SHARED_M2P_ENTRY) > > + > > +/* Xen always owns P2M on RISC-V */ > > +#define set_gpfn_from_mfn(mfn, pfn) do { (void) (mfn), > > (void)(pfn); } while (0) > > Nit: Stray blank again after cast. > > > +#define mfn_to_gfn(d, mfn) ((void)(d), _gfn(mfn_x(mfn))) > > What's the relation of the comment with these two #define-s? > > > +#define PDX_GROUP_SHIFT (16 + 5) > > + > > +static inline unsigned long domain_get_maximum_gpfn(struct domain > > *d) > > +{ > > + BUG(); > > + return 0; > > +} > > + > > +static inline long arch_memory_op(int op, > > XEN_GUEST_HANDLE_PARAM(void) arg) > > +{ > > + BUG(); > > + return 0; > > +} > > + > > +/* > > + * On RISCV, all the RAM is currently direct mapped in Xen. > > + * Hence return always true. > > + */ > > +static inline bool arch_mfns_in_directmap(unsigned long mfn, > > unsigned long nr) > > +{ > > + return true; > > +} > > + > > +#define PG_shift(idx) (BITS_PER_LONG - (idx)) > > +#define PG_mask(x, idx) (x ## UL << PG_shift(idx)) > > + > > +#define PGT_none PG_mask(0, 1) /* no special uses of > > this page */ > > +#define PGT_writable_page PG_mask(1, 1) /* has writable > > mappings? */ > > +#define PGT_type_mask PG_mask(1, 1) /* Bits 31 or > > 63. */ > > + > > + /* Count of uses of this frame as its current type. */ > > +#define PGT_count_width PG_shift(2) > > +#define PGT_count_mask ((1UL<<PGT_count_width)-1) > > Nit: Style (missing blanks around binary operators). Also a few more > times further down. > > > +/* > > + * Page needs to be scrubbed. Since this bit can only be set on a > > page that is > > + * free (i.e. in PGC_state_free) we can reuse PGC_allocated bit. > > + */ > > +#define _PGC_need_scrub _PGC_allocated > > +#define PGC_need_scrub PGC_allocated > > + > > +// /* Cleared when the owning guest 'frees' this page. */ > > Why a commented out comment? > > > +#define _PGC_allocated PG_shift(1) > > +#define PGC_allocated PG_mask(1, 1) > > + /* Page is Xen heap? */ > > +#define _PGC_xen_heap PG_shift(2) > > +#define PGC_xen_heap PG_mask(1, 2) > > +/* Page is broken? */ > > +#define _PGC_broken PG_shift(7) > > +#define PGC_broken PG_mask(1, 7) > > + /* Mutually-exclusive page states: { inuse, offlining, offlined, > > free }. */ > > Can similar comments in this block please all be similarly indented > (or not)? > > > +#define PGC_state PG_mask(3, 9) > > +#define PGC_state_inuse PG_mask(0, 9) > > +#define PGC_state_offlining PG_mask(1, 9) > > +#define PGC_state_offlined PG_mask(2, 9) > > +#define PGC_state_free PG_mask(3, 9) > > +// #define page_state_is(pg, st) (((pg)->count_info&PGC_state) == > > PGC_state_##st) > > ??? > > > +/* Count of references to this frame. */ > > +#define PGC_count_width PG_shift(9) > > +#define PGC_count_mask ((1UL<<PGC_count_width)-1) > > + > > +#define page_state_is(pg, st) (((pg)->count_info&PGC_state) == > > PGC_state_##st) > > And here it then "properly" appears? > > > +#define _PGC_extra PG_shift(10) > > +#define PGC_extra PG_mask(1, 10) > > + > > +#define is_xen_heap_page(page) ((page)->count_info & PGC_xen_heap) > > +#define is_xen_heap_mfn(mfn) \ > > + (mfn_valid(mfn) && is_xen_heap_page(mfn_to_page(mfn))) > > + > > +#define is_xen_fixed_mfn(mfn) \ > > + ((mfn_to_maddr(mfn) >= virt_to_maddr(&_start)) && \ > > + (mfn_to_maddr(mfn) <= virt_to_maddr((vaddr_t)_end - 1))) > > Why does _start need prefixing wuth & and _end prefixing with a cast? > First and foremost both want to be consistent. And then preferably > with as little extra clutter as possible. > > > +#define page_get_owner(_p) (_p)->v.inuse.domain > > +#define page_set_owner(_p,_d) ((_p)->v.inuse.domain = (_d)) > > + > > +/* TODO: implement */ > > +#define mfn_valid(mfn) ({ (void) (mfn); 0; }) > > + > > +#define mfn_to_gfn(d, mfn) ((void)(d), _gfn(mfn_x(mfn))) > > This appeared further up already. > > > +#define domain_set_alloc_bitsize(d) ((void)0) > > Better ((void)(d)) ? And then ... > > > +#define domain_clamp_alloc_bitsize(d, b) (b) > > ... ((void)(d), (b)) here? > > Jan
On 02.02.2024 18:30, Oleksii wrote: > On Tue, 2024-01-23 at 14:03 +0100, Jan Beulich wrote: >> On 22.12.2023 16:13, Oleksii Kurochko wrote: >>> +/* Convert between Xen-heap virtual addresses and machine frame >>> numbers. */ >>> +#define __virt_to_mfn(va) (virt_to_maddr(va) >> PAGE_SHIFT) >>> +#define __mfn_to_virt(mfn) maddr_to_virt((paddr_t)(mfn) << >>> PAGE_SHIFT) >> >> These would imo better use maddr_to_mfn() and mfn_to_maddr(), rather >> than >> kind of open-coding them. The former could also use PFN_DOWN() as an >> alternative. > We can't to as __virt_to_mfn() when is used it is usually wrapped by > _mfn() which expect to have unsigned long as an argument. #define __virt_to_mfn(va) mfn_x(maddr_to_mfn(virt_to_maddr(va)) #define __mfn_to_virt(mfn) maddr_to_virt(mfn_to_maddr(_mfn(mfn))) ? >>> +/* Convert between Xen-heap virtual addresses and page-info >>> structures. */ >>> +static inline struct page_info *virt_to_page(const void *v) >>> +{ >>> + BUG(); >>> + return NULL; >>> +} >>> + >>> +/* >>> + * We define non-underscored wrappers for above conversion >>> functions. >>> + * These are overriden in various source files while underscored >>> version >>> + * remain intact. >>> + */ >>> +#define virt_to_mfn(va) __virt_to_mfn(va) >>> +#define mfn_to_virt(mfn) __mfn_to_virt(mfn) >> >> Is this really still needed? Would be pretty nice if in a new port we >> could get to start cleanly right away (i.e. by not needing per-file >> overrides, but using type-safe expansions here right away). > We still need __virt_to_mfn and __mfn_to_virt as common code use them: > * xen/common/xenoprof.c:24:#define virt_to_mfn(va) > mfn(__virt_to_mfn(va)) Are you meaning to enable this code on RISC-V. > * xen/include/xen/domain_page.h:59:#define domain_page_map_to_mfn(ptr) > _mfn(__virt_to_mfn((unsigned long)(ptr))) Hmm, yes, we should finally get this sorted. But yeah, not something I want to ask you to do. Jan
On Mon, 2024-02-05 at 08:46 +0100, Jan Beulich wrote: > On 02.02.2024 18:30, Oleksii wrote: > > On Tue, 2024-01-23 at 14:03 +0100, Jan Beulich wrote: > > > On 22.12.2023 16:13, Oleksii Kurochko wrote: > > > > +/* Convert between Xen-heap virtual addresses and machine > > > > frame > > > > numbers. */ > > > > +#define __virt_to_mfn(va) (virt_to_maddr(va) >> PAGE_SHIFT) > > > > +#define __mfn_to_virt(mfn) maddr_to_virt((paddr_t)(mfn) << > > > > PAGE_SHIFT) > > > > > > These would imo better use maddr_to_mfn() and mfn_to_maddr(), > > > rather > > > than > > > kind of open-coding them. The former could also use PFN_DOWN() as > > > an > > > alternative. > > We can't to as __virt_to_mfn() when is used it is usually wrapped > > by > > _mfn() which expect to have unsigned long as an argument. > > #define __virt_to_mfn(va) mfn_x(maddr_to_mfn(virt_to_maddr(va)) > #define __mfn_to_virt(mfn) maddr_to_virt(mfn_to_maddr(_mfn(mfn))) > > ? I had an issue with the compilation when I tried to define them in similar way. I'll double check again. > > > > > +/* Convert between Xen-heap virtual addresses and page-info > > > > structures. */ > > > > +static inline struct page_info *virt_to_page(const void *v) > > > > +{ > > > > + BUG(); > > > > + return NULL; > > > > +} > > > > + > > > > +/* > > > > + * We define non-underscored wrappers for above conversion > > > > functions. > > > > + * These are overriden in various source files while > > > > underscored > > > > version > > > > + * remain intact. > > > > + */ > > > > +#define virt_to_mfn(va) __virt_to_mfn(va) > > > > +#define mfn_to_virt(mfn) __mfn_to_virt(mfn) > > > > > > Is this really still needed? Would be pretty nice if in a new > > > port we > > > could get to start cleanly right away (i.e. by not needing per- > > > file > > > overrides, but using type-safe expansions here right away). > > We still need __virt_to_mfn and __mfn_to_virt as common code use > > them: > > * xen/common/xenoprof.c:24:#define virt_to_mfn(va) > > mfn(__virt_to_mfn(va)) > > Are you meaning to enable this code on RISC-V. Yes, that is what I meant. ~ Oleksii > > > * xen/include/xen/domain_page.h:59:#define > > domain_page_map_to_mfn(ptr) > > _mfn(__virt_to_mfn((unsigned long)(ptr))) > > Hmm, yes, we should finally get this sorted. But yeah, not something > I want > to ask you to do. > > Jan
On 05.02.2024 13:49, Oleksii wrote: > On Mon, 2024-02-05 at 08:46 +0100, Jan Beulich wrote: >> On 02.02.2024 18:30, Oleksii wrote: >>> On Tue, 2024-01-23 at 14:03 +0100, Jan Beulich wrote: >>>> On 22.12.2023 16:13, Oleksii Kurochko wrote: >>>>> +/* Convert between Xen-heap virtual addresses and page-info >>>>> structures. */ >>>>> +static inline struct page_info *virt_to_page(const void *v) >>>>> +{ >>>>> + BUG(); >>>>> + return NULL; >>>>> +} >>>>> + >>>>> +/* >>>>> + * We define non-underscored wrappers for above conversion >>>>> functions. >>>>> + * These are overriden in various source files while >>>>> underscored >>>>> version >>>>> + * remain intact. >>>>> + */ >>>>> +#define virt_to_mfn(va) __virt_to_mfn(va) >>>>> +#define mfn_to_virt(mfn) __mfn_to_virt(mfn) >>>> >>>> Is this really still needed? Would be pretty nice if in a new >>>> port we >>>> could get to start cleanly right away (i.e. by not needing per- >>>> file >>>> overrides, but using type-safe expansions here right away). >>> We still need __virt_to_mfn and __mfn_to_virt as common code use >>> them: >>> * xen/common/xenoprof.c:24:#define virt_to_mfn(va) >>> mfn(__virt_to_mfn(va)) >> >> Are you meaning to enable this code on RISC-V. > Yes, that is what I meant. And why would you do that? Even Arm doesn't use it, and I'd expect no newer port would care about this very old interface. Jan
On Mon, 2024-02-05 at 15:05 +0100, Jan Beulich wrote: > On 05.02.2024 13:49, Oleksii wrote: > > On Mon, 2024-02-05 at 08:46 +0100, Jan Beulich wrote: > > > On 02.02.2024 18:30, Oleksii wrote: > > > > On Tue, 2024-01-23 at 14:03 +0100, Jan Beulich wrote: > > > > > On 22.12.2023 16:13, Oleksii Kurochko wrote: > > > > > > +/* Convert between Xen-heap virtual addresses and page- > > > > > > info > > > > > > structures. */ > > > > > > +static inline struct page_info *virt_to_page(const void > > > > > > *v) > > > > > > +{ > > > > > > + BUG(); > > > > > > + return NULL; > > > > > > +} > > > > > > + > > > > > > +/* > > > > > > + * We define non-underscored wrappers for above conversion > > > > > > functions. > > > > > > + * These are overriden in various source files while > > > > > > underscored > > > > > > version > > > > > > + * remain intact. > > > > > > + */ > > > > > > +#define virt_to_mfn(va) __virt_to_mfn(va) > > > > > > +#define mfn_to_virt(mfn) __mfn_to_virt(mfn) > > > > > > > > > > Is this really still needed? Would be pretty nice if in a new > > > > > port we > > > > > could get to start cleanly right away (i.e. by not needing > > > > > per- > > > > > file > > > > > overrides, but using type-safe expansions here right away). > > > > We still need __virt_to_mfn and __mfn_to_virt as common code > > > > use > > > > them: > > > > * xen/common/xenoprof.c:24:#define virt_to_mfn(va) > > > > mfn(__virt_to_mfn(va)) > > > > > > Are you meaning to enable this code on RISC-V. > > Yes, that is what I meant. > > And why would you do that? Even Arm doesn't use it, and I'd expect no > newer port would care about this very old interface. Oh, sorry, I read your question wrongly. I am not going to enable that config. ~ Oleksii
diff --git a/xen/arch/riscv/include/asm/config.h b/xen/arch/riscv/include/asm/config.h index fb9fc9daaa..400309f4ef 100644 --- a/xen/arch/riscv/include/asm/config.h +++ b/xen/arch/riscv/include/asm/config.h @@ -67,6 +67,8 @@ #define XEN_VIRT_START 0xFFFFFFFFC0000000 /* (_AC(-1, UL) + 1 - GB(1)) */ +#define DIRECTMAP_VIRT_START SLOTN(200) + #define FRAMETABLE_VIRT_START SLOTN(196) #define FRAMETABLE_SIZE GB(3) #define FRAMETABLE_NR (FRAMETABLE_SIZE / sizeof(*frame_table)) diff --git a/xen/arch/riscv/include/asm/mm.h b/xen/arch/riscv/include/asm/mm.h index 57026e134d..14fce72fde 100644 --- a/xen/arch/riscv/include/asm/mm.h +++ b/xen/arch/riscv/include/asm/mm.h @@ -3,8 +3,251 @@ #ifndef _ASM_RISCV_MM_H #define _ASM_RISCV_MM_H +#include <public/xen.h> +#include <xen/pdx.h> +#include <xen/types.h> + +#include <asm/page.h> #include <asm/page-bits.h> +#define paddr_to_pdx(pa) mfn_to_pdx(maddr_to_mfn(pa)) +#define gfn_to_gaddr(gfn) pfn_to_paddr(gfn_x(gfn)) +#define gaddr_to_gfn(ga) _gfn(paddr_to_pfn(ga)) +#define mfn_to_maddr(mfn) pfn_to_paddr(mfn_x(mfn)) +#define maddr_to_mfn(ma) _mfn(paddr_to_pfn(ma)) +#define vmap_to_mfn(va) maddr_to_mfn(virt_to_maddr((vaddr_t)va)) +#define vmap_to_page(va) mfn_to_page(vmap_to_mfn(va)) +#define paddr_to_pdx(pa) mfn_to_pdx(maddr_to_mfn(pa)) +#define gfn_to_gaddr(gfn) pfn_to_paddr(gfn_x(gfn)) +#define gaddr_to_gfn(ga) _gfn(paddr_to_pfn(ga)) +#define mfn_to_maddr(mfn) pfn_to_paddr(mfn_x(mfn)) +#define maddr_to_mfn(ma) _mfn(paddr_to_pfn(ma)) +#define vmap_to_mfn(va) maddr_to_mfn(virt_to_maddr((vaddr_t)va)) +#define vmap_to_page(va) mfn_to_page(vmap_to_mfn(va)) + +#define virt_to_maddr(va) ((paddr_t)((vaddr_t)(va) & PADDR_MASK)) +#define maddr_to_virt(pa) ((void *)((paddr_t)(pa) | DIRECTMAP_VIRT_START)) + +/* Convert between Xen-heap virtual addresses and machine frame numbers. */ +#define __virt_to_mfn(va) (virt_to_maddr(va) >> PAGE_SHIFT) +#define __mfn_to_virt(mfn) maddr_to_virt((paddr_t)(mfn) << PAGE_SHIFT) + +/* Convert between Xen-heap virtual addresses and page-info structures. */ +static inline struct page_info *virt_to_page(const void *v) +{ + BUG(); + return NULL; +} + +/* + * We define non-underscored wrappers for above conversion functions. + * These are overriden in various source files while underscored version + * remain intact. + */ +#define virt_to_mfn(va) __virt_to_mfn(va) +#define mfn_to_virt(mfn) __mfn_to_virt(mfn) + +struct page_info +{ + /* Each frame can be threaded onto a doubly-linked list. */ + struct page_list_entry list; + + /* Reference count and various PGC_xxx flags and fields. */ + unsigned long count_info; + + /* Context-dependent fields follow... */ + union { + /* Page is in use: ((count_info & PGC_count_mask) != 0). */ + struct { + /* Type reference count and various PGT_xxx flags and fields. */ + unsigned long type_info; + } inuse; + /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */ + union { + struct { + /* + * Index of the first *possibly* unscrubbed page in the buddy. + * One more bit than maximum possible order to accommodate + * INVALID_DIRTY_IDX. + */ +#define INVALID_DIRTY_IDX ((1UL << (MAX_ORDER + 1)) - 1) + unsigned long first_dirty:MAX_ORDER + 1; + + /* Do TLBs need flushing for safety before next page use? */ + bool need_tlbflush:1; + +#define BUDDY_NOT_SCRUBBING 0 +#define BUDDY_SCRUBBING 1 +#define BUDDY_SCRUB_ABORT 2 + unsigned long scrub_state:2; + }; + + unsigned long val; + } free; + + } u; + + union { + /* Page is in use, but not as a shadow. */ + struct { + /* Owner of this page (zero if page is anonymous). */ + struct domain *domain; + } inuse; + + /* Page is on a free list. */ + struct { + /* Order-size of the free chunk this page is the head of. */ + unsigned int order; + } free; + + } v; + + union { + /* + * Timestamp from 'TLB clock', used to avoid extra safety flushes. + * Only valid for: a) free pages, and b) pages with zero type count + */ + uint32_t tlbflush_timestamp; + }; + uint64_t pad; +}; + +#define frame_table ((struct page_info *)FRAMETABLE_VIRT_START) + +/* PDX of the first page in the frame table. */ +extern unsigned long frametable_base_pdx; + +/* Convert between machine frame numbers and page-info structures. */ +#define mfn_to_page(mfn) \ + (frame_table + (mfn_to_pdx(mfn) - frametable_base_pdx)) +#define page_to_mfn(pg) \ + pdx_to_mfn((unsigned long)((pg) - frame_table) + frametable_base_pdx) + +static inline void *page_to_virt(const struct page_info *pg) +{ + return mfn_to_virt(mfn_x(page_to_mfn(pg))); +} + +/* + * Common code requires get_page_type and put_page_type. + * We don't care about typecounts so we just do the minimum to make it + * happy. + */ +static inline int get_page_type(struct page_info *page, unsigned long type) +{ + return 1; +} + +static inline void put_page_type(struct page_info *page) +{ +} + +static inline void put_page_and_type(struct page_info *page) +{ + put_page_type(page); + put_page(page); +} + +/* + * RISC-V does not have an M2P, but common code expects a handful of + * M2P-related defines and functions. Provide dummy versions of these. + */ +#define INVALID_M2P_ENTRY (~0UL) +#define SHARED_M2P_ENTRY (~0UL - 1UL) +#define SHARED_M2P(_e) ((_e) == SHARED_M2P_ENTRY) + +/* Xen always owns P2M on RISC-V */ +#define set_gpfn_from_mfn(mfn, pfn) do { (void) (mfn), (void)(pfn); } while (0) +#define mfn_to_gfn(d, mfn) ((void)(d), _gfn(mfn_x(mfn))) + +#define PDX_GROUP_SHIFT (16 + 5) + +static inline unsigned long domain_get_maximum_gpfn(struct domain *d) +{ + BUG(); + return 0; +} + +static inline long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg) +{ + BUG(); + return 0; +} + +/* + * On RISCV, all the RAM is currently direct mapped in Xen. + * Hence return always true. + */ +static inline bool arch_mfns_in_directmap(unsigned long mfn, unsigned long nr) +{ + return true; +} + +#define PG_shift(idx) (BITS_PER_LONG - (idx)) +#define PG_mask(x, idx) (x ## UL << PG_shift(idx)) + +#define PGT_none PG_mask(0, 1) /* no special uses of this page */ +#define PGT_writable_page PG_mask(1, 1) /* has writable mappings? */ +#define PGT_type_mask PG_mask(1, 1) /* Bits 31 or 63. */ + + /* Count of uses of this frame as its current type. */ +#define PGT_count_width PG_shift(2) +#define PGT_count_mask ((1UL<<PGT_count_width)-1) + +/* + * Page needs to be scrubbed. Since this bit can only be set on a page that is + * free (i.e. in PGC_state_free) we can reuse PGC_allocated bit. + */ +#define _PGC_need_scrub _PGC_allocated +#define PGC_need_scrub PGC_allocated + +// /* Cleared when the owning guest 'frees' this page. */ +#define _PGC_allocated PG_shift(1) +#define PGC_allocated PG_mask(1, 1) + /* Page is Xen heap? */ +#define _PGC_xen_heap PG_shift(2) +#define PGC_xen_heap PG_mask(1, 2) +/* Page is broken? */ +#define _PGC_broken PG_shift(7) +#define PGC_broken PG_mask(1, 7) + /* Mutually-exclusive page states: { inuse, offlining, offlined, free }. */ +#define PGC_state PG_mask(3, 9) +#define PGC_state_inuse PG_mask(0, 9) +#define PGC_state_offlining PG_mask(1, 9) +#define PGC_state_offlined PG_mask(2, 9) +#define PGC_state_free PG_mask(3, 9) +// #define page_state_is(pg, st) (((pg)->count_info&PGC_state) == PGC_state_##st) + +/* Count of references to this frame. */ +#define PGC_count_width PG_shift(9) +#define PGC_count_mask ((1UL<<PGC_count_width)-1) + +#define page_state_is(pg, st) (((pg)->count_info&PGC_state) == PGC_state_##st) + +#define _PGC_extra PG_shift(10) +#define PGC_extra PG_mask(1, 10) + +#define is_xen_heap_page(page) ((page)->count_info & PGC_xen_heap) +#define is_xen_heap_mfn(mfn) \ + (mfn_valid(mfn) && is_xen_heap_page(mfn_to_page(mfn))) + +#define is_xen_fixed_mfn(mfn) \ + ((mfn_to_maddr(mfn) >= virt_to_maddr(&_start)) && \ + (mfn_to_maddr(mfn) <= virt_to_maddr((vaddr_t)_end - 1))) + +#define page_get_owner(_p) (_p)->v.inuse.domain +#define page_set_owner(_p,_d) ((_p)->v.inuse.domain = (_d)) + +/* TODO: implement */ +#define mfn_valid(mfn) ({ (void) (mfn); 0; }) + +#define mfn_to_gfn(d, mfn) ((void)(d), _gfn(mfn_x(mfn))) + +#define domain_set_alloc_bitsize(d) ((void)0) +#define domain_clamp_alloc_bitsize(d, b) (b) + +#define PFN_ORDER(_pfn) ((_pfn)->v.free.order) + extern unsigned char cpu0_boot_stack[]; void setup_initial_pagetables(void); @@ -17,4 +260,9 @@ unsigned long calc_phys_offset(void); void turn_on_mmu(unsigned long ra); +static inline unsigned int arch_get_dma_bitsize(void) +{ + return 32; /* TODO */ +} + #endif /* _ASM_RISCV_MM_H */
Signed-off-by: Oleksii Kurochko <oleksii.kurochko@gmail.com> --- Changes in V3: - update the commit message - introduce DIRECTMAP_VIRT_START. - drop changes related pfn_to_paddr() and paddr_to_pfn as they were remvoe in [PATCH v2 32/39] xen/riscv: add minimal stuff to asm/page.h to build full Xen - code style fixes. - drop get_page_nr and put_page_nr as they don't need for time being - drop CONFIG_STATIC_MEMORY related things - code style fixes --- Changes in V2: - define stub for arch_get_dma_bitsize(void) --- xen/arch/riscv/include/asm/config.h | 2 + xen/arch/riscv/include/asm/mm.h | 248 ++++++++++++++++++++++++++++ 2 files changed, 250 insertions(+)