Message ID | 035f63f2b92b963f2585064fa21e09e73157f9d3.1717410850.git.Sergiy_Kibrik@epam.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | x86: make cpu virtualization support configurable | expand |
On 03.06.2024 13:13, Sergiy Kibrik wrote: > --- a/xen/arch/x86/include/asm/p2m.h > +++ b/xen/arch/x86/include/asm/p2m.h > @@ -577,10 +577,10 @@ static inline gfn_t mfn_to_gfn(const struct domain *d, mfn_t mfn) > return _gfn(mfn_x(mfn)); > } > > -#ifdef CONFIG_HVM > #define AP2MGET_prepopulate true > #define AP2MGET_query false > > +#ifdef CONFIG_ALTP2M > /* > * Looks up altp2m entry. If the entry is not found it looks up the entry in > * hostp2m. In principle this #ifdef shouldn't need moving. It's just that the three use sites need taking care of a little differently. E.g. ... > @@ -589,6 +589,15 @@ static inline gfn_t mfn_to_gfn(const struct domain *d, mfn_t mfn) > int altp2m_get_effective_entry(struct p2m_domain *ap2m, gfn_t gfn, mfn_t *mfn, > p2m_type_t *t, p2m_access_t *a, > bool prepopulate); > +#else > +static inline int altp2m_get_effective_entry(struct p2m_domain *ap2m, > + gfn_t gfn, mfn_t *mfn, > + p2m_type_t *t, p2m_access_t *a, > + bool prepopulate) > +{ > + ASSERT_UNREACHABLE(); > + return -EOPNOTSUPP; > +} static inline int altp2m_get_effective_entry(struct p2m_domain *ap2m, gfn_t gfn, mfn_t *mfn, p2m_type_t *t, p2m_access_t *a) { ASSERT_UNREACHABLE(); return -EOPNOTSUPP; } #define altp2m_get_effective_entry(ap2m, gfn, mfn, t, a, prepopulate) \ altp2m_get_effective_entry(ap2m, gfn, mfn, t, a) Misra doesn't like such shadowing, so the inline function may want naming slightly differently, e.g. _ap2m_get_effective_entry(). > @@ -914,8 +923,14 @@ static inline bool p2m_set_altp2m(struct vcpu *v, unsigned int idx) > /* Switch alternate p2m for a single vcpu */ > bool p2m_switch_vcpu_altp2m_by_id(struct vcpu *v, unsigned int idx); > > +#ifdef CONFIG_ALTP2M > /* Check to see if vcpu should be switched to a different p2m. */ > void p2m_altp2m_check(struct vcpu *v, uint16_t idx); > +#else > +static inline void p2m_altp2m_check(struct vcpu *v, uint16_t idx) > +{ > +} Btw, no need to put the braces on separate lines for such a stub. Jan
07.06.24 10:47, Jan Beulich: > On 03.06.2024 13:13, Sergiy Kibrik wrote: >> --- a/xen/arch/x86/include/asm/p2m.h >> +++ b/xen/arch/x86/include/asm/p2m.h >> @@ -577,10 +577,10 @@ static inline gfn_t mfn_to_gfn(const struct domain *d, mfn_t mfn) >> return _gfn(mfn_x(mfn)); >> } >> >> -#ifdef CONFIG_HVM >> #define AP2MGET_prepopulate true >> #define AP2MGET_query false >> >> +#ifdef CONFIG_ALTP2M >> /* >> * Looks up altp2m entry. If the entry is not found it looks up the entry in >> * hostp2m. > > In principle this #ifdef shouldn't need moving. It's just that the > three use sites need taking care of a little differently. E.g. ... > >> @@ -589,6 +589,15 @@ static inline gfn_t mfn_to_gfn(const struct domain *d, mfn_t mfn) >> int altp2m_get_effective_entry(struct p2m_domain *ap2m, gfn_t gfn, mfn_t *mfn, >> p2m_type_t *t, p2m_access_t *a, >> bool prepopulate); >> +#else >> +static inline int altp2m_get_effective_entry(struct p2m_domain *ap2m, >> + gfn_t gfn, mfn_t *mfn, >> + p2m_type_t *t, p2m_access_t *a, >> + bool prepopulate) >> +{ >> + ASSERT_UNREACHABLE(); >> + return -EOPNOTSUPP; >> +} > > static inline int altp2m_get_effective_entry(struct p2m_domain *ap2m, > gfn_t gfn, mfn_t *mfn, > p2m_type_t *t, p2m_access_t *a) > { > ASSERT_UNREACHABLE(); > return -EOPNOTSUPP; > } > #define altp2m_get_effective_entry(ap2m, gfn, mfn, t, a, prepopulate) \ > altp2m_get_effective_entry(ap2m, gfn, mfn, t, a) > > Misra doesn't like such shadowing, so the inline function may want > naming slightly differently, e.g. _ap2m_get_effective_entry(). I can do that, sure. Though here I'm curious what benefits we're getting from little complication of an indirect call to an empty stub -- is avoiding of AP2MGET_* defines worth it? -Sergiy
On 10.06.2024 12:48, Sergiy Kibrik wrote: > 07.06.24 10:47, Jan Beulich: >> On 03.06.2024 13:13, Sergiy Kibrik wrote: >>> --- a/xen/arch/x86/include/asm/p2m.h >>> +++ b/xen/arch/x86/include/asm/p2m.h >>> @@ -577,10 +577,10 @@ static inline gfn_t mfn_to_gfn(const struct domain *d, mfn_t mfn) >>> return _gfn(mfn_x(mfn)); >>> } >>> >>> -#ifdef CONFIG_HVM >>> #define AP2MGET_prepopulate true >>> #define AP2MGET_query false >>> >>> +#ifdef CONFIG_ALTP2M >>> /* >>> * Looks up altp2m entry. If the entry is not found it looks up the entry in >>> * hostp2m. >> >> In principle this #ifdef shouldn't need moving. It's just that the >> three use sites need taking care of a little differently. E.g. ... >> >>> @@ -589,6 +589,15 @@ static inline gfn_t mfn_to_gfn(const struct domain *d, mfn_t mfn) >>> int altp2m_get_effective_entry(struct p2m_domain *ap2m, gfn_t gfn, mfn_t *mfn, >>> p2m_type_t *t, p2m_access_t *a, >>> bool prepopulate); >>> +#else >>> +static inline int altp2m_get_effective_entry(struct p2m_domain *ap2m, >>> + gfn_t gfn, mfn_t *mfn, >>> + p2m_type_t *t, p2m_access_t *a, >>> + bool prepopulate) >>> +{ >>> + ASSERT_UNREACHABLE(); >>> + return -EOPNOTSUPP; >>> +} >> >> static inline int altp2m_get_effective_entry(struct p2m_domain *ap2m, >> gfn_t gfn, mfn_t *mfn, >> p2m_type_t *t, p2m_access_t *a) >> { >> ASSERT_UNREACHABLE(); >> return -EOPNOTSUPP; >> } >> #define altp2m_get_effective_entry(ap2m, gfn, mfn, t, a, prepopulate) \ >> altp2m_get_effective_entry(ap2m, gfn, mfn, t, a) >> >> Misra doesn't like such shadowing, so the inline function may want >> naming slightly differently, e.g. _ap2m_get_effective_entry(). > > > I can do that, sure. > Though here I'm curious what benefits we're getting from little > complication of an indirect call to an empty stub -- is avoiding of > AP2MGET_* defines worth it? First - how is an indirect call coming into the picture here? We aim at avoiding indirect calls where possible, after all. Yet iirc all calls to altp2m_get_effective_entry() are direct ones. As to avoiding the (or in fact any such) #define-s - imo it's better to not have items in the name space which can't validly be used, unless this (elsewhere) helps to keep the code tidy overall. This way problems (e.g. from misunderstandings or flaws elsewhere) can be detected early. Jan
diff --git a/xen/arch/x86/Kconfig b/xen/arch/x86/Kconfig index 8c9f8431f0..4a35c43dc5 100644 --- a/xen/arch/x86/Kconfig +++ b/xen/arch/x86/Kconfig @@ -358,6 +358,17 @@ config REQUIRE_NX was unavailable. However, if enabled, Xen will no longer boot on any CPU which is lacking NX support. +config ALTP2M + bool "Alternate P2M support" if EXPERT + default y + depends on VMX + help + Alternate-p2m allows a guest to manage multiple p2m guest physical + "memory views" (as opposed to a single p2m). + Useful for memory introspection. + + If unsure, stay with defaults. + endmenu source "common/Kconfig" diff --git a/xen/arch/x86/include/asm/altp2m.h b/xen/arch/x86/include/asm/altp2m.h index 2d36c5aa9b..effbef51eb 100644 --- a/xen/arch/x86/include/asm/altp2m.h +++ b/xen/arch/x86/include/asm/altp2m.h @@ -7,7 +7,7 @@ #ifndef __ASM_X86_ALTP2M_H #define __ASM_X86_ALTP2M_H -#ifdef CONFIG_HVM +#ifdef CONFIG_ALTP2M #include <xen/types.h> #include <xen/sched.h> /* for struct vcpu, struct domain */ @@ -34,6 +34,9 @@ static inline bool altp2m_active(const struct domain *d) } /* Only declaration is needed. DCE will optimise it out when linking. */ +void altp2m_vcpu_initialise(struct vcpu *v); +void altp2m_vcpu_destroy(struct vcpu *v); +int altp2m_vcpu_enable_ve(struct vcpu *v, gfn_t gfn); void altp2m_vcpu_disable_ve(struct vcpu *v); #endif diff --git a/xen/arch/x86/include/asm/hvm/hvm.h b/xen/arch/x86/include/asm/hvm/hvm.h index 1c01e22c8e..2ebea1a92c 100644 --- a/xen/arch/x86/include/asm/hvm/hvm.h +++ b/xen/arch/x86/include/asm/hvm/hvm.h @@ -670,7 +670,7 @@ static inline bool hvm_hap_supported(void) /* returns true if hardware supports alternate p2m's */ static inline bool hvm_altp2m_supported(void) { - return hvm_funcs.caps.altp2m; + return IS_ENABLED(CONFIG_ALTP2M) && hvm_funcs.caps.altp2m; } /* Returns true if we have the minimum hardware requirements for nested virt */ diff --git a/xen/arch/x86/include/asm/p2m.h b/xen/arch/x86/include/asm/p2m.h index c1478ffc36..b247aa4c7d 100644 --- a/xen/arch/x86/include/asm/p2m.h +++ b/xen/arch/x86/include/asm/p2m.h @@ -577,10 +577,10 @@ static inline gfn_t mfn_to_gfn(const struct domain *d, mfn_t mfn) return _gfn(mfn_x(mfn)); } -#ifdef CONFIG_HVM #define AP2MGET_prepopulate true #define AP2MGET_query false +#ifdef CONFIG_ALTP2M /* * Looks up altp2m entry. If the entry is not found it looks up the entry in * hostp2m. @@ -589,6 +589,15 @@ static inline gfn_t mfn_to_gfn(const struct domain *d, mfn_t mfn) int altp2m_get_effective_entry(struct p2m_domain *ap2m, gfn_t gfn, mfn_t *mfn, p2m_type_t *t, p2m_access_t *a, bool prepopulate); +#else +static inline int altp2m_get_effective_entry(struct p2m_domain *ap2m, + gfn_t gfn, mfn_t *mfn, + p2m_type_t *t, p2m_access_t *a, + bool prepopulate) +{ + ASSERT_UNREACHABLE(); + return -EOPNOTSUPP; +} #endif /* Init the datastructures for later use by the p2m code */ @@ -914,8 +923,14 @@ static inline bool p2m_set_altp2m(struct vcpu *v, unsigned int idx) /* Switch alternate p2m for a single vcpu */ bool p2m_switch_vcpu_altp2m_by_id(struct vcpu *v, unsigned int idx); +#ifdef CONFIG_ALTP2M /* Check to see if vcpu should be switched to a different p2m. */ void p2m_altp2m_check(struct vcpu *v, uint16_t idx); +#else +static inline void p2m_altp2m_check(struct vcpu *v, uint16_t idx) +{ +} +#endif /* Flush all the alternate p2m's for a domain */ void p2m_flush_altp2m(struct domain *d); diff --git a/xen/arch/x86/mm/Makefile b/xen/arch/x86/mm/Makefile index 0128ca7ab6..d7d57b8190 100644 --- a/xen/arch/x86/mm/Makefile +++ b/xen/arch/x86/mm/Makefile @@ -1,7 +1,7 @@ obj-y += shadow/ obj-$(CONFIG_HVM) += hap/ -obj-$(CONFIG_HVM) += altp2m.o +obj-$(CONFIG_ALTP2M) += altp2m.o obj-$(CONFIG_HVM) += guest_walk_2.o guest_walk_3.o guest_walk_4.o obj-$(CONFIG_SHADOW_PAGING) += guest_walk_4.o obj-$(CONFIG_MEM_ACCESS) += mem_access.o
Add new option to make altp2m code inclusion optional. Currently altp2m implemented for Intel EPT only, so option is dependant on VMX. Also the prompt itself depends on EXPERT=y, so that option is available for fine-tuning, if one want to play around with it. Use this option instead of more generic CONFIG_HVM option. That implies the possibility to build hvm code without altp2m support, hence we need to declare altp2m routines for hvm code to compile successfully (altp2m_vcpu_initialise(), altp2m_vcpu_destroy(), altp2m_vcpu_enable_ve()) Also guard altp2m routines, so that they can be disabled completely in the build -- when target platform does not actually support altp2m (AMD-V & ARM as of now). Signed-off-by: Sergiy Kibrik <Sergiy_Kibrik@epam.com> CC: Tamas K Lengyel <tamas@tklengyel.com> CC: Jan Beulich <jbeulich@suse.com> CC: Stefano Stabellini <sstabellini@kernel.org> --- changes in v3: - added help text - use conditional prompt depending on EXPERT=y - corrected & extended patch description - put a blank line before #ifdef CONFIG_ALTP2M - sqashed in a separate patch for guarding altp2m code with CONFIG_ALTP2M option changes in v2: - use separate CONFIG_ALTP2M option instead of CONFIG_VMX --- xen/arch/x86/Kconfig | 11 +++++++++++ xen/arch/x86/include/asm/altp2m.h | 5 ++++- xen/arch/x86/include/asm/hvm/hvm.h | 2 +- xen/arch/x86/include/asm/p2m.h | 17 ++++++++++++++++- xen/arch/x86/mm/Makefile | 2 +- 5 files changed, 33 insertions(+), 4 deletions(-)