Message ID | 20200103160825.19377-7-liuwe@microsoft.com (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | More Hyper-V infrastructure | expand |
On 03/01/2020 16:08, Wei Liu wrote: > @@ -83,14 +84,39 @@ static void __init setup_hypercall_page(void) > wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); > } > > +static void setup_hypercall_pcpu_arg(void) > +{ > + struct page_info *pg; > + void *mapping; > + unsigned int cpu = smp_processor_id(); > + > + pg = alloc_domheap_page(NULL, 0); > + if ( !pg ) > + panic("Failed to setup hypercall input page for %u\n", cpu); > + > + mapping = __map_domain_page_global(pg); > + if ( !mapping ) > + panic("Failed to map hypercall input page for %u\n", cpu); Sorry I didn't spot this before, but an always-mapped domheap page is just alloc_xenheap_page() (give or take NUMA positioning above the 5T boundary, which isn't used here). ~Andrew
On Fri, Jan 03, 2020 at 04:30:49PM +0000, Andrew Cooper wrote: > On 03/01/2020 16:08, Wei Liu wrote: > > @@ -83,14 +84,39 @@ static void __init setup_hypercall_page(void) > > wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); > > } > > > > +static void setup_hypercall_pcpu_arg(void) > > +{ > > + struct page_info *pg; > > + void *mapping; > > + unsigned int cpu = smp_processor_id(); > > + > > + pg = alloc_domheap_page(NULL, 0); > > + if ( !pg ) > > + panic("Failed to setup hypercall input page for %u\n", cpu); > > + > > + mapping = __map_domain_page_global(pg); > > + if ( !mapping ) > > + panic("Failed to map hypercall input page for %u\n", cpu); > > Sorry I didn't spot this before, but an always-mapped domheap page is > just alloc_xenheap_page() (give or take NUMA positioning above the 5T > boundary, which isn't used here). I had the (wrong) impression that using domheap was preferred. I'm fine with switching to xenheap, of course. Wei. > > ~Andrew
On 03/01/2020 16:55, Wei Liu wrote: > On Fri, Jan 03, 2020 at 04:30:49PM +0000, Andrew Cooper wrote: >> On 03/01/2020 16:08, Wei Liu wrote: >>> @@ -83,14 +84,39 @@ static void __init setup_hypercall_page(void) >>> wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); >>> } >>> >>> +static void setup_hypercall_pcpu_arg(void) >>> +{ >>> + struct page_info *pg; >>> + void *mapping; >>> + unsigned int cpu = smp_processor_id(); >>> + >>> + pg = alloc_domheap_page(NULL, 0); >>> + if ( !pg ) >>> + panic("Failed to setup hypercall input page for %u\n", cpu); >>> + >>> + mapping = __map_domain_page_global(pg); >>> + if ( !mapping ) >>> + panic("Failed to map hypercall input page for %u\n", cpu); >> Sorry I didn't spot this before, but an always-mapped domheap page is >> just alloc_xenheap_page() (give or take NUMA positioning above the 5T >> boundary, which isn't used here). > I had the (wrong) impression that using domheap was preferred. > > I'm fine with switching to xenheap, of course. This is a frame which Xen needs to have a mapping to in perpetuity, to make hypercalls. Most examples in code are a regular domheap frame which, after some guest action, requires mapping in Xen for a period of time, or frames which we want to have specific NUMA properties, and may be beyond the end of the directmap. ~Andrew
On Fri, Jan 03, 2020 at 04:57:11PM +0000, Andrew Cooper wrote: > On 03/01/2020 16:55, Wei Liu wrote: > > On Fri, Jan 03, 2020 at 04:30:49PM +0000, Andrew Cooper wrote: > >> On 03/01/2020 16:08, Wei Liu wrote: > >>> @@ -83,14 +84,39 @@ static void __init setup_hypercall_page(void) > >>> wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); > >>> } > >>> > >>> +static void setup_hypercall_pcpu_arg(void) > >>> +{ > >>> + struct page_info *pg; > >>> + void *mapping; > >>> + unsigned int cpu = smp_processor_id(); > >>> + > >>> + pg = alloc_domheap_page(NULL, 0); > >>> + if ( !pg ) > >>> + panic("Failed to setup hypercall input page for %u\n", cpu); > >>> + > >>> + mapping = __map_domain_page_global(pg); > >>> + if ( !mapping ) > >>> + panic("Failed to map hypercall input page for %u\n", cpu); > >> Sorry I didn't spot this before, but an always-mapped domheap page is > >> just alloc_xenheap_page() (give or take NUMA positioning above the 5T > >> boundary, which isn't used here). > > I had the (wrong) impression that using domheap was preferred. > > > > I'm fine with switching to xenheap, of course. > > This is a frame which Xen needs to have a mapping to in perpetuity, to > make hypercalls. > > Most examples in code are a regular domheap frame which, after some > guest action, requires mapping in Xen for a period of time, or frames > which we want to have specific NUMA properties, and may be beyond the > end of the directmap. Alright. If we use xenheap here I can drop the tracking structure. I will change that in v3. Wei. > > ~Andrew
diff --git a/xen/arch/x86/guest/hyperv/hyperv.c b/xen/arch/x86/guest/hyperv/hyperv.c index 381be2a68c..03027bd453 100644 --- a/xen/arch/x86/guest/hyperv/hyperv.c +++ b/xen/arch/x86/guest/hyperv/hyperv.c @@ -27,6 +27,7 @@ struct ms_hyperv_info __read_mostly ms_hyperv; extern char hv_hypercall_page[]; +DEFINE_PER_CPU_READ_MOSTLY(struct hyperv_pcpu_page, hv_pcpu_input_arg); static const struct hypervisor_ops ops; const struct hypervisor_ops *__init hyperv_probe(void) @@ -83,14 +84,39 @@ static void __init setup_hypercall_page(void) wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); } +static void setup_hypercall_pcpu_arg(void) +{ + struct page_info *pg; + void *mapping; + unsigned int cpu = smp_processor_id(); + + pg = alloc_domheap_page(NULL, 0); + if ( !pg ) + panic("Failed to setup hypercall input page for %u\n", cpu); + + mapping = __map_domain_page_global(pg); + if ( !mapping ) + panic("Failed to map hypercall input page for %u\n", cpu); + + this_cpu(hv_pcpu_input_arg).maddr = page_to_maddr(pg); + this_cpu(hv_pcpu_input_arg).mapping = mapping; +} + static void __init setup(void) { setup_hypercall_page(); + setup_hypercall_pcpu_arg(); +} + +static void ap_setup(void) +{ + setup_hypercall_pcpu_arg(); } static const struct hypervisor_ops ops = { .name = "Hyper-V", .setup = setup, + .ap_setup = ap_setup, }; /* diff --git a/xen/include/asm-x86/guest/hyperv.h b/xen/include/asm-x86/guest/hyperv.h index c7a7f32bd5..83f297468f 100644 --- a/xen/include/asm-x86/guest/hyperv.h +++ b/xen/include/asm-x86/guest/hyperv.h @@ -51,6 +51,8 @@ static inline uint64_t hv_scale_tsc(uint64_t tsc, uint64_t scale, #ifdef CONFIG_HYPERV_GUEST +#include <xen/percpu.h> + #include <asm/guest/hypervisor.h> struct ms_hyperv_info { @@ -63,6 +65,12 @@ struct ms_hyperv_info { }; extern struct ms_hyperv_info ms_hyperv; +struct hyperv_pcpu_page { + paddr_t maddr; + void *mapping; +}; +DECLARE_PER_CPU(struct hyperv_pcpu_page, hv_pcpu_input_arg); + const struct hypervisor_ops *hyperv_probe(void); #else