Message ID | 20171108090739.26491-3-jgross@suse.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
* Juergen Gross <jgross@suse.com> wrote: > Add a new guest_late_init hook to the hypervisor_x86 structure. It > will replace the current kvm_guest_init() call which is changed to > make use of the new hook. > > Signed-off-by: Juergen Gross <jgross@suse.com> > --- > arch/x86/include/asm/hypervisor.h | 11 +++++++++++ > arch/x86/include/asm/kvm_para.h | 2 -- > arch/x86/kernel/kvm.c | 3 ++- > arch/x86/kernel/setup.c | 2 +- > 4 files changed, 14 insertions(+), 4 deletions(-) > > diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h > index 0ead9dbb9130..37320687b8cb 100644 > --- a/arch/x86/include/asm/hypervisor.h > +++ b/arch/x86/include/asm/hypervisor.h > @@ -38,6 +38,9 @@ struct hypervisor_x86 { > /* Platform setup (run once per boot) */ > void (*init_platform)(void); > > + /* Guest late init */ > + void (*guest_late_init)(void); > + > /* X2APIC detection (run once per boot) */ > bool (*x2apic_available)(void); > > @@ -66,9 +69,17 @@ static inline void hypervisor_init_mem_mapping(void) > if (x86_hyper && x86_hyper->init_mem_mapping) > x86_hyper->init_mem_mapping(); > } > + > +static inline void hypervisor_guest_late_init(void) > +{ > + if (x86_hyper && x86_hyper->guest_late_init) > + x86_hyper->guest_late_init(); > +} > + > #else > static inline void init_hypervisor_platform(void) { } > static inline bool hypervisor_x2apic_available(void) { return false; } > static inline void hypervisor_init_mem_mapping(void) { } > +static inline void hypervisor_guest_late_init(void) { } > #endif /* CONFIG_HYPERVISOR_GUEST */ > #endif /* _ASM_X86_HYPERVISOR_H */ > diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h > index c373e44049b1..7b407dda2bd7 100644 > --- a/arch/x86/include/asm/kvm_para.h > +++ b/arch/x86/include/asm/kvm_para.h > @@ -88,7 +88,6 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1, > #ifdef CONFIG_KVM_GUEST > bool kvm_para_available(void); > unsigned int kvm_arch_para_features(void); > -void __init kvm_guest_init(void); > void kvm_async_pf_task_wait(u32 token, int interrupt_kernel); > void kvm_async_pf_task_wake(u32 token); > u32 kvm_read_and_reset_pf_reason(void); > @@ -103,7 +102,6 @@ static inline void kvm_spinlock_init(void) > #endif /* CONFIG_PARAVIRT_SPINLOCKS */ > > #else /* CONFIG_KVM_GUEST */ > -#define kvm_guest_init() do {} while (0) > #define kvm_async_pf_task_wait(T, I) do {} while(0) > #define kvm_async_pf_task_wake(T) do {} while(0) > > diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c > index 8bb9594d0761..d331b5060aa9 100644 > --- a/arch/x86/kernel/kvm.c > +++ b/arch/x86/kernel/kvm.c > @@ -465,7 +465,7 @@ static void __init kvm_apf_trap_init(void) > update_intr_gate(X86_TRAP_PF, async_page_fault); > } > > -void __init kvm_guest_init(void) > +static void __init kvm_guest_init(void) > { > int i; > > @@ -548,6 +548,7 @@ const struct hypervisor_x86 x86_hyper_kvm __refconst = { > .name = "KVM", > .detect = kvm_detect, > .x2apic_available = kvm_para_available, > + .guest_late_init = kvm_guest_init, > }; > EXPORT_SYMBOL_GPL(x86_hyper_kvm); > > diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c > index 0957dd73d127..578569481d87 100644 > --- a/arch/x86/kernel/setup.c > +++ b/arch/x86/kernel/setup.c > @@ -1294,7 +1294,7 @@ void __init setup_arch(char **cmdline_p) > > io_apic_init_mappings(); > > - kvm_guest_init(); > + hypervisor_guest_late_init(); No principal objections, but could we please use a more obvious pattern showing that this is a callback, by calling it directly: x86_hyper->guest_late_init(); Plus add a default empty function (which hypervisors can override). This avoids all the hidden conditions and wrappery. Thanks, Ingo
On 08/11/17 10:13, Ingo Molnar wrote: > > * Juergen Gross <jgross@suse.com> wrote: > >> Add a new guest_late_init hook to the hypervisor_x86 structure. It >> will replace the current kvm_guest_init() call which is changed to >> make use of the new hook. >> >> Signed-off-by: Juergen Gross <jgross@suse.com> >> --- >> arch/x86/include/asm/hypervisor.h | 11 +++++++++++ >> arch/x86/include/asm/kvm_para.h | 2 -- >> arch/x86/kernel/kvm.c | 3 ++- >> arch/x86/kernel/setup.c | 2 +- >> 4 files changed, 14 insertions(+), 4 deletions(-) >> >> diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h >> index 0ead9dbb9130..37320687b8cb 100644 >> --- a/arch/x86/include/asm/hypervisor.h >> +++ b/arch/x86/include/asm/hypervisor.h >> @@ -38,6 +38,9 @@ struct hypervisor_x86 { >> /* Platform setup (run once per boot) */ >> void (*init_platform)(void); >> >> + /* Guest late init */ >> + void (*guest_late_init)(void); >> + >> /* X2APIC detection (run once per boot) */ >> bool (*x2apic_available)(void); >> >> @@ -66,9 +69,17 @@ static inline void hypervisor_init_mem_mapping(void) >> if (x86_hyper && x86_hyper->init_mem_mapping) >> x86_hyper->init_mem_mapping(); >> } >> + >> +static inline void hypervisor_guest_late_init(void) >> +{ >> + if (x86_hyper && x86_hyper->guest_late_init) >> + x86_hyper->guest_late_init(); >> +} >> + >> #else >> static inline void init_hypervisor_platform(void) { } >> static inline bool hypervisor_x2apic_available(void) { return false; } >> static inline void hypervisor_init_mem_mapping(void) { } >> +static inline void hypervisor_guest_late_init(void) { } >> #endif /* CONFIG_HYPERVISOR_GUEST */ >> #endif /* _ASM_X86_HYPERVISOR_H */ >> diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h >> index c373e44049b1..7b407dda2bd7 100644 >> --- a/arch/x86/include/asm/kvm_para.h >> +++ b/arch/x86/include/asm/kvm_para.h >> @@ -88,7 +88,6 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1, >> #ifdef CONFIG_KVM_GUEST >> bool kvm_para_available(void); >> unsigned int kvm_arch_para_features(void); >> -void __init kvm_guest_init(void); >> void kvm_async_pf_task_wait(u32 token, int interrupt_kernel); >> void kvm_async_pf_task_wake(u32 token); >> u32 kvm_read_and_reset_pf_reason(void); >> @@ -103,7 +102,6 @@ static inline void kvm_spinlock_init(void) >> #endif /* CONFIG_PARAVIRT_SPINLOCKS */ >> >> #else /* CONFIG_KVM_GUEST */ >> -#define kvm_guest_init() do {} while (0) >> #define kvm_async_pf_task_wait(T, I) do {} while(0) >> #define kvm_async_pf_task_wake(T) do {} while(0) >> >> diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c >> index 8bb9594d0761..d331b5060aa9 100644 >> --- a/arch/x86/kernel/kvm.c >> +++ b/arch/x86/kernel/kvm.c >> @@ -465,7 +465,7 @@ static void __init kvm_apf_trap_init(void) >> update_intr_gate(X86_TRAP_PF, async_page_fault); >> } >> >> -void __init kvm_guest_init(void) >> +static void __init kvm_guest_init(void) >> { >> int i; >> >> @@ -548,6 +548,7 @@ const struct hypervisor_x86 x86_hyper_kvm __refconst = { >> .name = "KVM", >> .detect = kvm_detect, >> .x2apic_available = kvm_para_available, >> + .guest_late_init = kvm_guest_init, >> }; >> EXPORT_SYMBOL_GPL(x86_hyper_kvm); >> >> diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c >> index 0957dd73d127..578569481d87 100644 >> --- a/arch/x86/kernel/setup.c >> +++ b/arch/x86/kernel/setup.c >> @@ -1294,7 +1294,7 @@ void __init setup_arch(char **cmdline_p) >> >> io_apic_init_mappings(); >> >> - kvm_guest_init(); >> + hypervisor_guest_late_init(); > > No principal objections, but could we please use a more obvious pattern showing > that this is a callback, by calling it directly: > > x86_hyper->guest_late_init(); > > Plus add a default empty function (which hypervisors can override). This avoids > all the hidden conditions and wrappery. Hmm, x86_hyper is just a pointer being NULL on bare metal. So we would have to add a pre-filled struct with dummy functions and in case a hypervisor is detected we'd need to copy all non-NULL pointers of the hypervisor specific struct to the pre-filled one. In case there are no objections I can add a patch to modify the current way x86_hyper is used to the pre-filled variant. Juergen
* Juergen Gross <jgross@suse.com> wrote: > > Plus add a default empty function (which hypervisors can override). This avoids > > all the hidden conditions and wrappery. > > Hmm, x86_hyper is just a pointer being NULL on bare metal. So we would > have to add a pre-filled struct with dummy functions and in case a > hypervisor is detected we'd need to copy all non-NULL pointers of the > hypervisor specific struct to the pre-filled one. Ok, I missed that detail - but yeah, since this is mostly about boot code, where readability is king, I still think it would be an overall improvement. This is the pattern we are trying to use with x86_platform ops for example, and doing: git grep -w x86_platform arch/x86 gives a pretty clear idea about how it's used - while if it was all within wrappers it would be a lot more difficult to discover all the callsites. Admittedly it's not done totally consistently: arch/x86/kernel/apic/probe_32.c: if (x86_platform.apic_post_init) arch/x86/kernel/apic/probe_64.c: if (x86_platform.apic_post_init) arch/x86/kernel/ebda.c: if (!x86_platform.legacy.reserve_bios_regions) arch/x86/kernel/platform-quirks.c: if (x86_platform.set_legacy_features) arch/x86/platform/intel-mid/device_libs/platform_mrfld_rtc.c: if (!x86_platform.legacy.rtc) ... but the _idea_ behind it is consistent ;-) > In case there are no objections I can add a patch to modify the current > way x86_hyper is used to the pre-filled variant. Yeah, sounds good to me! Thanks, Ingo
On 08/11/17 10:40, Ingo Molnar wrote: > > * Juergen Gross <jgross@suse.com> wrote: > >>> Plus add a default empty function (which hypervisors can override). This avoids >>> all the hidden conditions and wrappery. >> >> Hmm, x86_hyper is just a pointer being NULL on bare metal. So we would >> have to add a pre-filled struct with dummy functions and in case a >> hypervisor is detected we'd need to copy all non-NULL pointers of the >> hypervisor specific struct to the pre-filled one. > > Ok, I missed that detail - but yeah, since this is mostly about boot code, > where readability is king, I still think it would be an overall improvement. > > This is the pattern we are trying to use with x86_platform ops for example, and > doing: > > git grep -w x86_platform arch/x86 > > gives a pretty clear idea about how it's used - while if it was all within > wrappers it would be a lot more difficult to discover all the callsites. > > Admittedly it's not done totally consistently: > > arch/x86/kernel/apic/probe_32.c: if (x86_platform.apic_post_init) > arch/x86/kernel/apic/probe_64.c: if (x86_platform.apic_post_init) > arch/x86/kernel/ebda.c: if (!x86_platform.legacy.reserve_bios_regions) > arch/x86/kernel/platform-quirks.c: if (x86_platform.set_legacy_features) > arch/x86/platform/intel-mid/device_libs/platform_mrfld_rtc.c: if (!x86_platform.legacy.rtc) > > ... but the _idea_ behind it is consistent ;-) > >> In case there are no objections I can add a patch to modify the current >> way x86_hyper is used to the pre-filled variant. > > Yeah, sounds good to me! Okay. With you mentioning x86_platform: should I make x86_hyper a member of x86_platform (e.g. x86_platform.hyper.) ? I think this would fit quite nice. Juergen
On 08/11/2017 10:07, Juergen Gross wrote: > Add a new guest_late_init hook to the hypervisor_x86 structure. It > will replace the current kvm_guest_init() call which is changed to > make use of the new hook. > > Signed-off-by: Juergen Gross <jgross@suse.com> The trivial KVM changes are of course Acked-by: Paolo Bonzini <pbonzini@redhat.com> Paolo > --- > arch/x86/include/asm/hypervisor.h | 11 +++++++++++ > arch/x86/include/asm/kvm_para.h | 2 -- > arch/x86/kernel/kvm.c | 3 ++- > arch/x86/kernel/setup.c | 2 +- > 4 files changed, 14 insertions(+), 4 deletions(-) > > diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h > index 0ead9dbb9130..37320687b8cb 100644 > --- a/arch/x86/include/asm/hypervisor.h > +++ b/arch/x86/include/asm/hypervisor.h > @@ -38,6 +38,9 @@ struct hypervisor_x86 { > /* Platform setup (run once per boot) */ > void (*init_platform)(void); > > + /* Guest late init */ > + void (*guest_late_init)(void); > + > /* X2APIC detection (run once per boot) */ > bool (*x2apic_available)(void); > > @@ -66,9 +69,17 @@ static inline void hypervisor_init_mem_mapping(void) > if (x86_hyper && x86_hyper->init_mem_mapping) > x86_hyper->init_mem_mapping(); > } > + > +static inline void hypervisor_guest_late_init(void) > +{ > + if (x86_hyper && x86_hyper->guest_late_init) > + x86_hyper->guest_late_init(); > +} > + > #else > static inline void init_hypervisor_platform(void) { } > static inline bool hypervisor_x2apic_available(void) { return false; } > static inline void hypervisor_init_mem_mapping(void) { } > +static inline void hypervisor_guest_late_init(void) { } > #endif /* CONFIG_HYPERVISOR_GUEST */ > #endif /* _ASM_X86_HYPERVISOR_H */ > diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h > index c373e44049b1..7b407dda2bd7 100644 > --- a/arch/x86/include/asm/kvm_para.h > +++ b/arch/x86/include/asm/kvm_para.h > @@ -88,7 +88,6 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1, > #ifdef CONFIG_KVM_GUEST > bool kvm_para_available(void); > unsigned int kvm_arch_para_features(void); > -void __init kvm_guest_init(void); > void kvm_async_pf_task_wait(u32 token, int interrupt_kernel); > void kvm_async_pf_task_wake(u32 token); > u32 kvm_read_and_reset_pf_reason(void); > @@ -103,7 +102,6 @@ static inline void kvm_spinlock_init(void) > #endif /* CONFIG_PARAVIRT_SPINLOCKS */ > > #else /* CONFIG_KVM_GUEST */ > -#define kvm_guest_init() do {} while (0) > #define kvm_async_pf_task_wait(T, I) do {} while(0) > #define kvm_async_pf_task_wake(T) do {} while(0) > > diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c > index 8bb9594d0761..d331b5060aa9 100644 > --- a/arch/x86/kernel/kvm.c > +++ b/arch/x86/kernel/kvm.c > @@ -465,7 +465,7 @@ static void __init kvm_apf_trap_init(void) > update_intr_gate(X86_TRAP_PF, async_page_fault); > } > > -void __init kvm_guest_init(void) > +static void __init kvm_guest_init(void) > { > int i; > > @@ -548,6 +548,7 @@ const struct hypervisor_x86 x86_hyper_kvm __refconst = { > .name = "KVM", > .detect = kvm_detect, > .x2apic_available = kvm_para_available, > + .guest_late_init = kvm_guest_init, > }; > EXPORT_SYMBOL_GPL(x86_hyper_kvm); > > diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c > index 0957dd73d127..578569481d87 100644 > --- a/arch/x86/kernel/setup.c > +++ b/arch/x86/kernel/setup.c > @@ -1294,7 +1294,7 @@ void __init setup_arch(char **cmdline_p) > > io_apic_init_mappings(); > > - kvm_guest_init(); > + hypervisor_guest_late_init(); > > e820__reserve_resources(); > e820__register_nosave_regions(max_low_pfn); >
* Juergen Gross <jgross@suse.com> wrote: > On 08/11/17 10:40, Ingo Molnar wrote: > > > > * Juergen Gross <jgross@suse.com> wrote: > > > >>> Plus add a default empty function (which hypervisors can override). This avoids > >>> all the hidden conditions and wrappery. > >> > >> Hmm, x86_hyper is just a pointer being NULL on bare metal. So we would > >> have to add a pre-filled struct with dummy functions and in case a > >> hypervisor is detected we'd need to copy all non-NULL pointers of the > >> hypervisor specific struct to the pre-filled one. > > > > Ok, I missed that detail - but yeah, since this is mostly about boot code, > > where readability is king, I still think it would be an overall improvement. > > > > This is the pattern we are trying to use with x86_platform ops for example, and > > doing: > > > > git grep -w x86_platform arch/x86 > > > > gives a pretty clear idea about how it's used - while if it was all within > > wrappers it would be a lot more difficult to discover all the callsites. > > > > Admittedly it's not done totally consistently: > > > > arch/x86/kernel/apic/probe_32.c: if (x86_platform.apic_post_init) > > arch/x86/kernel/apic/probe_64.c: if (x86_platform.apic_post_init) > > arch/x86/kernel/ebda.c: if (!x86_platform.legacy.reserve_bios_regions) > > arch/x86/kernel/platform-quirks.c: if (x86_platform.set_legacy_features) > > arch/x86/platform/intel-mid/device_libs/platform_mrfld_rtc.c: if (!x86_platform.legacy.rtc) > > > > ... but the _idea_ behind it is consistent ;-) > > > >> In case there are no objections I can add a patch to modify the current > >> way x86_hyper is used to the pre-filled variant. > > > > Yeah, sounds good to me! > > Okay. With you mentioning x86_platform: should I make x86_hyper a member > of x86_platform (e.g. x86_platform.hyper.) ? > > I think this would fit quite nice. Yeah, seems like a good idea! Thanks, Ingo
diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h index 0ead9dbb9130..37320687b8cb 100644 --- a/arch/x86/include/asm/hypervisor.h +++ b/arch/x86/include/asm/hypervisor.h @@ -38,6 +38,9 @@ struct hypervisor_x86 { /* Platform setup (run once per boot) */ void (*init_platform)(void); + /* Guest late init */ + void (*guest_late_init)(void); + /* X2APIC detection (run once per boot) */ bool (*x2apic_available)(void); @@ -66,9 +69,17 @@ static inline void hypervisor_init_mem_mapping(void) if (x86_hyper && x86_hyper->init_mem_mapping) x86_hyper->init_mem_mapping(); } + +static inline void hypervisor_guest_late_init(void) +{ + if (x86_hyper && x86_hyper->guest_late_init) + x86_hyper->guest_late_init(); +} + #else static inline void init_hypervisor_platform(void) { } static inline bool hypervisor_x2apic_available(void) { return false; } static inline void hypervisor_init_mem_mapping(void) { } +static inline void hypervisor_guest_late_init(void) { } #endif /* CONFIG_HYPERVISOR_GUEST */ #endif /* _ASM_X86_HYPERVISOR_H */ diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h index c373e44049b1..7b407dda2bd7 100644 --- a/arch/x86/include/asm/kvm_para.h +++ b/arch/x86/include/asm/kvm_para.h @@ -88,7 +88,6 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1, #ifdef CONFIG_KVM_GUEST bool kvm_para_available(void); unsigned int kvm_arch_para_features(void); -void __init kvm_guest_init(void); void kvm_async_pf_task_wait(u32 token, int interrupt_kernel); void kvm_async_pf_task_wake(u32 token); u32 kvm_read_and_reset_pf_reason(void); @@ -103,7 +102,6 @@ static inline void kvm_spinlock_init(void) #endif /* CONFIG_PARAVIRT_SPINLOCKS */ #else /* CONFIG_KVM_GUEST */ -#define kvm_guest_init() do {} while (0) #define kvm_async_pf_task_wait(T, I) do {} while(0) #define kvm_async_pf_task_wake(T) do {} while(0) diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 8bb9594d0761..d331b5060aa9 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -465,7 +465,7 @@ static void __init kvm_apf_trap_init(void) update_intr_gate(X86_TRAP_PF, async_page_fault); } -void __init kvm_guest_init(void) +static void __init kvm_guest_init(void) { int i; @@ -548,6 +548,7 @@ const struct hypervisor_x86 x86_hyper_kvm __refconst = { .name = "KVM", .detect = kvm_detect, .x2apic_available = kvm_para_available, + .guest_late_init = kvm_guest_init, }; EXPORT_SYMBOL_GPL(x86_hyper_kvm); diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 0957dd73d127..578569481d87 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -1294,7 +1294,7 @@ void __init setup_arch(char **cmdline_p) io_apic_init_mappings(); - kvm_guest_init(); + hypervisor_guest_late_init(); e820__reserve_resources(); e820__register_nosave_regions(max_low_pfn);
Add a new guest_late_init hook to the hypervisor_x86 structure. It will replace the current kvm_guest_init() call which is changed to make use of the new hook. Signed-off-by: Juergen Gross <jgross@suse.com> --- arch/x86/include/asm/hypervisor.h | 11 +++++++++++ arch/x86/include/asm/kvm_para.h | 2 -- arch/x86/kernel/kvm.c | 3 ++- arch/x86/kernel/setup.c | 2 +- 4 files changed, 14 insertions(+), 4 deletions(-)