Message ID | e6e0f30ea69ecfed4d8de531fc42162162b3f6e5.1720501197.git.Sergiy_Kibrik@epam.com (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | x86: make CPU virtualisation support configurable | expand |
On 09.07.2024 08:09, Sergiy Kibrik wrote: > --- a/xen/arch/x86/include/asm/hvm/ioreq.h > +++ b/xen/arch/x86/include/asm/hvm/ioreq.h > @@ -13,6 +13,11 @@ > #define IOREQ_STATUS_UNHANDLED X86EMUL_UNHANDLEABLE > #define IOREQ_STATUS_RETRY X86EMUL_RETRY > > +#ifdef CONFIG_VMX > +bool arch_vcpu_ioreq_completion(enum vio_completion completion); > +#define arch_vcpu_ioreq_completion > +#endif Putting the (or some kind of) #define here is certainly fine, but moving ... > --- a/xen/include/xen/ioreq.h > +++ b/xen/include/xen/ioreq.h > @@ -111,7 +111,6 @@ void ioreq_domain_init(struct domain *d); > int ioreq_server_dm_op(struct xen_dm_op *op, struct domain *d, bool *const_op); > > bool arch_ioreq_complete_mmio(void); > -bool arch_vcpu_ioreq_completion(enum vio_completion completion); > int arch_ioreq_server_map_pages(struct ioreq_server *s); > void arch_ioreq_server_unmap_pages(struct ioreq_server *s); > void arch_ioreq_server_enable(struct ioreq_server *s); ... the declaration from here requires that all architectures wanting to implement the function need to have identical copies. That's unnecessary risk of going out of sync. As to the #define itself: It expanding to nothing means the call site de-generates to #ifdef arch_vcpu_ioreq_completion res = (completion); #else which hardly is what is meant (despite compiling fine, and it likely only being Eclair which would then tell us about the issue). Further there you're also removing a blank line, I don't see why you're doing that. Jan
09.07.24 10:28, Jan Beulich: > On 09.07.2024 08:09, Sergiy Kibrik wrote: >> --- a/xen/arch/x86/include/asm/hvm/ioreq.h >> +++ b/xen/arch/x86/include/asm/hvm/ioreq.h >> @@ -13,6 +13,11 @@ >> #define IOREQ_STATUS_UNHANDLED X86EMUL_UNHANDLEABLE >> #define IOREQ_STATUS_RETRY X86EMUL_RETRY >> >> +#ifdef CONFIG_VMX >> +bool arch_vcpu_ioreq_completion(enum vio_completion completion); >> +#define arch_vcpu_ioreq_completion >> +#endif > > Putting the (or some kind of) #define here is certainly fine, but moving ... > >> --- a/xen/include/xen/ioreq.h >> +++ b/xen/include/xen/ioreq.h >> @@ -111,7 +111,6 @@ void ioreq_domain_init(struct domain *d); >> int ioreq_server_dm_op(struct xen_dm_op *op, struct domain *d, bool *const_op); >> >> bool arch_ioreq_complete_mmio(void); >> -bool arch_vcpu_ioreq_completion(enum vio_completion completion); >> int arch_ioreq_server_map_pages(struct ioreq_server *s); >> void arch_ioreq_server_unmap_pages(struct ioreq_server *s); >> void arch_ioreq_server_enable(struct ioreq_server *s); > > ... the declaration from here requires that all architectures wanting to > implement the function need to have identical copies. That's unnecessary > risk of going out of sync. > > As to the #define itself: It expanding to nothing means the call site > de-generates to > > #ifdef arch_vcpu_ioreq_completion > res = (completion); > #else > > which hardly is what is meant (despite compiling fine, and it likely > only being Eclair which would then tell us about the issue). Further > there you're also removing a blank line, I don't see why you're doing > that. > looking through these changes once again I wonder why can't we just move stub to the header like this: in xen/include/xen/ioreq.h: #ifdef arch_vcpu_ioreq_completion #ifdef CONFIG_VMX bool arch_vcpu_ioreq_completion(enum vio_completion completion); #else static inline bool arch_vcpu_ioreq_completion(enum vio_completion completion) { ASSERT_UNREACHABLE(); return true; } #endif and avoid additional pre-processor variables & conditionals, because it looks like we do need some kind of stub that does ASSERT_UNREACHABLE() anyway. -Sergiy
On 10.07.2024 12:10, Sergiy Kibrik wrote: > 09.07.24 10:28, Jan Beulich: >> On 09.07.2024 08:09, Sergiy Kibrik wrote: >>> --- a/xen/arch/x86/include/asm/hvm/ioreq.h >>> +++ b/xen/arch/x86/include/asm/hvm/ioreq.h >>> @@ -13,6 +13,11 @@ >>> #define IOREQ_STATUS_UNHANDLED X86EMUL_UNHANDLEABLE >>> #define IOREQ_STATUS_RETRY X86EMUL_RETRY >>> >>> +#ifdef CONFIG_VMX >>> +bool arch_vcpu_ioreq_completion(enum vio_completion completion); >>> +#define arch_vcpu_ioreq_completion >>> +#endif >> >> Putting the (or some kind of) #define here is certainly fine, but moving ... >> >>> --- a/xen/include/xen/ioreq.h >>> +++ b/xen/include/xen/ioreq.h >>> @@ -111,7 +111,6 @@ void ioreq_domain_init(struct domain *d); >>> int ioreq_server_dm_op(struct xen_dm_op *op, struct domain *d, bool *const_op); >>> >>> bool arch_ioreq_complete_mmio(void); >>> -bool arch_vcpu_ioreq_completion(enum vio_completion completion); >>> int arch_ioreq_server_map_pages(struct ioreq_server *s); >>> void arch_ioreq_server_unmap_pages(struct ioreq_server *s); >>> void arch_ioreq_server_enable(struct ioreq_server *s); >> >> ... the declaration from here requires that all architectures wanting to >> implement the function need to have identical copies. That's unnecessary >> risk of going out of sync. >> >> As to the #define itself: It expanding to nothing means the call site >> de-generates to >> >> #ifdef arch_vcpu_ioreq_completion >> res = (completion); >> #else >> >> which hardly is what is meant (despite compiling fine, and it likely >> only being Eclair which would then tell us about the issue). Further >> there you're also removing a blank line, I don't see why you're doing >> that. >> > > looking through these changes once again I wonder why can't we just move > stub to the header like this: > > in xen/include/xen/ioreq.h: > > #ifdef arch_vcpu_ioreq_completion > > #ifdef CONFIG_VMX > bool arch_vcpu_ioreq_completion(enum vio_completion completion); > #else > static inline bool arch_vcpu_ioreq_completion(enum vio_completion > completion) > { > ASSERT_UNREACHABLE(); > return true; > } > #endif > > > and avoid additional pre-processor variables & conditionals, because it > looks like we do need some kind of stub that does ASSERT_UNREACHABLE() > anyway. That's possible to do, yes, but not as long as you key it off of CONFIG_VMX. This arch-specific setting would better not be used in a common code header. You could introduce a helper CONFIG_* which VMX selects, at which point doing what you suggest is an option. However, in what you have above I can't figure why "#ifdef arch_vcpu_ioreq_completion" is still there. Jan
10.07.24 13:19, Jan Beulich: >> looking through these changes once again I wonder why can't we just move >> stub to the header like this: >> >> in xen/include/xen/ioreq.h: >> >> #ifdef arch_vcpu_ioreq_completion >> >> #ifdef CONFIG_VMX >> bool arch_vcpu_ioreq_completion(enum vio_completion completion); >> #else >> static inline bool arch_vcpu_ioreq_completion(enum vio_completion >> completion) >> { >> ASSERT_UNREACHABLE(); >> return true; >> } >> #endif >> >> >> and avoid additional pre-processor variables & conditionals, because it >> looks like we do need some kind of stub that does ASSERT_UNREACHABLE() >> anyway. > That's possible to do, yes, but not as long as you key it off of CONFIG_VMX. > This arch-specific setting would better not be used in a common code header. > You could introduce a helper CONFIG_* which VMX selects, at which point > doing what you suggest is an option. > ok, I'll try this option in next series, lets see how it will look > However, in what you have above I can't figure why "#ifdef > arch_vcpu_ioreq_completion" is still there. > disregard it please, a copy-paste error -Sergiy
diff --git a/xen/arch/arm/ioreq.c b/xen/arch/arm/ioreq.c index 5df755b48b..2e829d2e7f 100644 --- a/xen/arch/arm/ioreq.c +++ b/xen/arch/arm/ioreq.c @@ -135,12 +135,6 @@ bool arch_ioreq_complete_mmio(void) return false; } -bool arch_vcpu_ioreq_completion(enum vio_completion completion) -{ - ASSERT_UNREACHABLE(); - return true; -} - /* * The "legacy" mechanism of mapping magic pages for the IOREQ servers * is x86 specific, so the following hooks don't need to be implemented on Arm: diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c index 4eb7a70182..0406630dc8 100644 --- a/xen/arch/x86/hvm/ioreq.c +++ b/xen/arch/x86/hvm/ioreq.c @@ -29,6 +29,7 @@ bool arch_ioreq_complete_mmio(void) return handle_mmio(); } +#ifdef CONFIG_VMX bool arch_vcpu_ioreq_completion(enum vio_completion completion) { switch ( completion ) @@ -51,6 +52,7 @@ bool arch_vcpu_ioreq_completion(enum vio_completion completion) return true; } +#endif static gfn_t hvm_alloc_legacy_ioreq_gfn(struct ioreq_server *s) { diff --git a/xen/arch/x86/include/asm/hvm/ioreq.h b/xen/arch/x86/include/asm/hvm/ioreq.h index 84be14fd08..c5f16a1e4a 100644 --- a/xen/arch/x86/include/asm/hvm/ioreq.h +++ b/xen/arch/x86/include/asm/hvm/ioreq.h @@ -13,6 +13,11 @@ #define IOREQ_STATUS_UNHANDLED X86EMUL_UNHANDLEABLE #define IOREQ_STATUS_RETRY X86EMUL_RETRY +#ifdef CONFIG_VMX +bool arch_vcpu_ioreq_completion(enum vio_completion completion); +#define arch_vcpu_ioreq_completion +#endif + #endif /* __ASM_X86_HVM_IOREQ_H__ */ /* diff --git a/xen/common/ioreq.c b/xen/common/ioreq.c index 1257a3d972..10fe932a7e 100644 --- a/xen/common/ioreq.c +++ b/xen/common/ioreq.c @@ -242,9 +242,12 @@ bool vcpu_ioreq_handle_completion(struct vcpu *v) res = handle_pio(vio->req.addr, vio->req.size, vio->req.dir); break; - default: +#ifdef arch_vcpu_ioreq_completion res = arch_vcpu_ioreq_completion(completion); +#else + ASSERT_UNREACHABLE(); +#endif break; } diff --git a/xen/include/xen/ioreq.h b/xen/include/xen/ioreq.h index cd399adf17..22fb9ba7b0 100644 --- a/xen/include/xen/ioreq.h +++ b/xen/include/xen/ioreq.h @@ -111,7 +111,6 @@ void ioreq_domain_init(struct domain *d); int ioreq_server_dm_op(struct xen_dm_op *op, struct domain *d, bool *const_op); bool arch_ioreq_complete_mmio(void); -bool arch_vcpu_ioreq_completion(enum vio_completion completion); int arch_ioreq_server_map_pages(struct ioreq_server *s); void arch_ioreq_server_unmap_pages(struct ioreq_server *s); void arch_ioreq_server_enable(struct ioreq_server *s);