Message ID | 1611601709-28361-11-git-send-email-olekstysh@gmail.com (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | IOREQ feature (+ virtio-mmio) on Arm | expand |
Hi Oleksandr, On 25/01/2021 19:08, Oleksandr Tyshchenko wrote: > From: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com> > > The IOREQ is a common feature now and these fields will be used > on Arm as is. Move them to common struct vcpu as a part of new > struct vcpu_io and drop duplicating "io" prefixes. Also move > enum hvm_io_completion to xen/sched.h and remove "hvm" prefixes. > > This patch completely removes layering violation in the common code. > > Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com> > Reviewed-by: Julien Grall <jgrall@amazon.com> > Reviewed-by: Paul Durrant <paul@xen.org> > Acked-by: Jan Beulich <jbeulich@suse.com> > CC: Julien Grall <julien.grall@arm.com> > [On Arm only] > Tested-by: Wei Chen <Wei.Chen@arm.com> I seem to have trouble running Xen on x86 platform after this patch is applied (see trace below). The bisector pointed out to this patch but I can't quite figure out why this is breaking. Does this ring any bell to someone? (XEN) *** Serial input to DOM0 (type 'CTRL-a' three times to switch input) (XEN) ----[ Xen-4.15-unstable x86_64 debug=n gcov=y Tainted: C ]---- (XEN) CPU: 1 (XEN) RIP: e008:[<ffff82d04041c1c7>] x86_64/entry.S#restore_all_guest+0x7/0x145 (XEN) RFLAGS: 0000000000010002 CONTEXT: hypervisor (d0v0) (XEN) rax: 00000000000000ff rbx: ffff83027c806000 rcx: ffff82d0406c9a80 (XEN) rdx: 0000000000000000 rsi: fffffffffffffed9 rdi: 0000000000000001 (XEN) rbp: ffff83027c887df0 rsp: ffff83027c887ef8 r8: 00000000aaa8946e (XEN) r9: 0000000000000002 r10: ffff83027c806040 r11: ffff83027c8cc020 (XEN) r12: ffff83027c80f000 r13: ffff83027c895000 r14: 0000000000000000 (XEN) r15: 0000000000000000 cr0: 0000000080050033 cr4: 00000000003426e0 (XEN) cr3: 0000000273a2d000 cr2: 0000000000000000 (XEN) fsb: 0000000000000000 gsb: 0000000000000000 gss: 0000000000000000 (XEN) ds: 0000 es: 0000 fs: 0000 gs: 0000 ss: 0000 cs: e008 (XEN) Xen code around <ffff82d04041c1c7> (x86_64/entry.S#restore_all_guest+0x7/0x145): (XEN) 00 48 8b 93 98 0d 00 00 <44> 8b 3a 4c 8b 8b 68 0b 00 00 ba ff 7f 00 00 48 (XEN) Xen stack trace from rsp=ffff83027c887ef8: (XEN) 0000000000000000 0000000000000000 0000000000000000 0000000000000000 (XEN) 0000000000000000 0000000000000000 0000000000000000 0000000000000000 (XEN) 0000000000000000 0000000000000000 0000000000000000 0000000000000000 (XEN) 0000000000000000 ffffffff83a2c000 0000000000000000 0000000000000000 (XEN) ffffffff82c9e160 000000000000e033 0000000000000200 ffffffff83a4f000 (XEN) 000000000000e02b 0000000000000000 0000000000000000 00000000ffffffff (XEN) 0000000000000000 0000e01000000001 ffff83027c806000 000000323c1d9000 (XEN) 00000000003426e0 0000000000000000 0000000000000000 0000060100000000 (XEN) 0000000000000000 (XEN) Xen call trace: (XEN) [<ffff82d04041c1c7>] R x86_64/entry.S#restore_all_guest+0x7/0x145 (XEN) (XEN) Pagetable walk from 0000000000000000: (XEN) L4[0x000] = 0000000000000000 ffffffffffffffff (XEN) Xen lock profile info SHOW (now = 4770175959 total = 4770175959) (XEN) Global xenpf_lock: addr=ffff82d04052c4a0, lockval=00000000, not locked (XEN) lock:0(0), block:0(0) (XEN) Global rtc_lock: addr=ffff82d04052c480, lockval=00010001, not locked (XEN) lock:0(0), block:0(0) (XEN) Global pit_lock: addr=ffff82d04052c470, lockval=00000000, not locked (XEN) lock:0(0), block:0(0) (XEN) Global platform_timer_lock: addr=ffff82d04052c460, lockval=000e000e, not locked (XEN) lock:6(57390), block:0(0) (XEN) Global sync_lock: addr=ffff82d04052c440, lockval=00000000, not locked (XEN) lock:0(0), block:0(0) (XEN) Global lock: addr=ffff82d04052c450, lockval=00000000, not locked (XEN) lock:0(0), block:0(0) (XEN) Global flush_lock: addr=ffff82d04052c430, lockval=00080007, cpu=0 (XEN) lock:3(49953), block:0(0) (XEN) Global pci_config_lock: addr=ffff82d04052c420, lockval=22702270, not locked (XEN) lock:14(100313), block:0(0) (XEN) Global lapic_nmi_owner_lock: addr=ffff82d04052bff0, lockval=00000000, not locked (XEN) lock:0(0), block:0(0) (XEN) Global map_pgdir_lock: addr=ffff82d04052bfd0, lockval=00050005, not locked (XEN) lock:3(386), block:0(0) (XEN) Global vector_lock: addr=ffff82d04052bfc0, lockval=00260026, not locked (XEN) lock:1(383), block:0(0) (XEN) Global irq_ratelimit_lock: addr=ffff82d04052bfb0, lockval=00000000, not locked (XEN) lock:0(0), block:0(0) (XEN) Global lock: addr=ffff82d04052bf90, lockval=00000000, not locked (XEN) lock:0(0), block:0(0) (XEN) Global msix_fixmap_lock: addr=ffff82d04052bf80, lockval=00000000, not locked (XEN) lock:0(0), block:0(0) (XEN) Global ioapic_lock: addr=ffff82d04052bf60, lockval=003a003a, not locked (XEN) lock:0(0), block:0(0) (XEN) Global i8259A_lock: addr=ffff82d04052bf50, lockval=00210021, not locked (XEN) lock:0(0), block:0(0) (XEN) Global osvw_lock: addr=ffff82d04052bf10, lockval=00000000, not locked (XEN) lock:0(0), block:0(0) (XEN) Global vpmu_lock: addr=ffff82d04052bf00, lockval=00040004, not locked (XEN) lock:4(449), block:0(0) (XEN) Global mtrr_mutex: addr=ffff82d04052bcf0, lockval=00000000, not locked (XEN) lock:0(0), block:0(0) (XEN) Global set_atomicity_lock: addr=ffff82d04052bce0, lockval=00040004, not locked (XEN) lock:0(0), block:0(0) (XEN) Global microcode_mutex: addr=ffff82d04052bcc0, lockval=00030003, not locked (XEN) lock:0(0), block:0(0) (XEN) Global cmci_discover_lock: addr=ffff82d04052bc90, lockval=00000000, not locked (XEN) lock:0(0), block:0(0) (XEN) Global mce_logout_lock: addr=ffff82d04052bc70, lockval=00000000, not locked (XEN) lock:0(0), block:0(0) (XEN) Global processing_lock: addr=ffff82d04052bc50, lockval=00000000, not locked (XEN) lock:0(0), block:0(0) (XEN) Global pm_lock: addr=ffff82d04052bbc0, lockval=00000000, not locked (XEN) lock:0(0), block:0(0) (XEN) Global apei_iomaps_lock: addr=ffff82d04052bbb0, lockval=00000000, not locked (XEN) lock:0(0), block:0(0) (XEN) Global erst_lock: addr=ffff82d04052bba0, lockval=00000000, not locked (XEN) lock:0(0), block:0(0) (XEN) Global shared_intremap_lock: addr=ffff82d04052bb20, lockval=00000000, not locked (XEN) lock:0(0), block:0(0) (XEN) Global console_lock: addr=ffff82d04052b7d0, lockval=01350135, not locked (XEN) lock:154(111298857), block:0(0) (XEN) Global ratelimit_lock: addr=ffff82d04052b7c0, lockval=00000000, not locked (XEN) lock:0(0), block:0(0) (XEN) Global lock: addr=ffff82d04052b7a0, lockval=00000000, not locked (XEN) lock:0(0), block:0(0) (XEN) Global sched_free_cpu_lock: addr=ffff82d04052b770, lockval=00040004, not locked (XEN) lock:4(2963), block:0(0) (XEN) Global cpupool_lock: addr=ffff82d04052b750, lockval=00030003, not locked (XEN) lock:3(19403), block:0(0) (XEN) Global sysctl_lock: addr=ffff82d04052b740, lockval=00000000, not locked (XEN) lock:0(0), block:0(0) (XEN) Global domctl_lock: addr=ffff82d04052b730, lockval=00000000, not locked (XEN) lock:0(0), block:0(0) (XEN) Global pool_list_lock: addr=ffff82d04052b720, lockval=00010001, not locked (XEN) lock:0(0), block:0(0) (XEN) Global xenoprof_lock: addr=ffff82d04052b710, lockval=00000000, not locked (XEN) lock:0(0), block:0(0) (XEN) Global pmu_owner_lock: addr=ffff82d04052b700, lockval=00000000, not locked (XEN) lock:0(0), block:0(0) (XEN) Global vm_lock: addr=ffff82d04052b6f0, lockval=00130013, not locked (XEN) lock:0(0), block:0(0) (XEN) Global virtual_region_lock: addr=ffff82d04052b6e0, lockval=00010001, not locked (XEN) lock:1(198), block:0(0) (XEN) Global wc_lock: addr=ffff82d04052b6d0, lockval=00020002, not locked (XEN) lock:1(770), block:0(0) (XEN) Global tasklet_lock: addr=ffff82d04052b6c0, lockval=03790379, not locked (XEN) lock:612(52925), block:0(0) (XEN) Global symbols_mutex: addr=ffff82d04052b6b0, lockval=00000000, not locked (XEN) lock:0(0), block:0(0) (XEN) Global stopmachine_lock: addr=ffff82d04052b6a0, lockval=00000000, not locked (XEN) lock:0(0), block:0(0) (XEN) Global call_lock: addr=ffff82d04052b680, lockval=00040004, not locked (XEN) lock:3(27688741), block:0(0) (XEN) Global lock: addr=ffff82d04052b560, lockval=00000000, not locked (XEN) lock:0(0), block:0(0) (XEN) Global heap_lock: addr=ffff82d04052b540, lockval=c5d0c5d0, not locked (XEN) lock:16949(5481440), block:1(34420) (XEN) Global crash_notes_lock: addr=ffff82d04052b520, lockval=00000000, not locked (XEN) lock:0(0), block:0(0) (XEN) Global lock: addr=ffff82d04052b500, lockval=00000000, not locked (XEN) lock:0(0), block:0(0) (XEN) Global global_virq_handlers_lock: addr=ffff82d04052b4f0, lockval=00000000, not locked (XEN) lock:0(0), block:0(0) (XEN) Global domlist_update_lock: addr=ffff82d04052b4d0, lockval=00010001, not locked (XEN) lock:1(105), block:0(0) (XEN) Global debugtrace_lock: addr=ffff82d04052b4b0, lockval=00030003, not locked (XEN) lock:3(3327), block:0(0) (XEN) Global accounting_lock: addr=ffff82d04052b480, lockval=00000000, not locked (XEN) lock:0(0), block:0(0) (XEN) Global efi_rs_lock: addr=ffff82d04052b020, lockval=00000000, not locked (XEN) lock:0(0), block:0(0) (XEN) Domain 0 event_lock: addr=ffff83027c80f110, lockval=00000000, not locked (XEN) lock:0(0), block:0(0) (XEN) Domain 0 page_alloc_lock: addr=ffff83027c80f028, lockval=05cf05cf, not locked (XEN) lock:1487(3914530), block:0(0) (XEN) Domain 0 domain_lock: addr=ffff83027c80f018, lockval=00000000, not locked (XEN) lock:0(0), block:0(0) (XEN) Domain 32767 page_alloc_lock: addr=ffff83027c875028, lockval=00000000, not locked (XEN) lock:0(0), block:0(0) (XEN) Domain 32767 domain_lock: addr=ffff83027c875018, lockval=00000000, not locked (XEN) lock:0(0), block:0(0) (XEN) Domain 32753 page_alloc_lock: addr=ffff83027c8c8028, lockval=00000000, not locked (XEN) lock:0(0), block:0(0) (XEN) Domain 32753 domain_lock: addr=ffff83027c8c8018, lockval=00000000, not locked (XEN) lock:0(0), block:0(0) (XEN) Domain 32754 page_alloc_lock: addr=ffff83027c8c9028, lockval=18001800, not locked (XEN) lock:6144(101574), block:0(0) (XEN) Domain 32754 domain_lock: addr=ffff83027c8c9018, lockval=00000000, not locked (XEN) lock:0(0), block:0(0) (XEN) debugtrace_dump() global buffer starting 1 cpupool_create(pool=0,sched=0) 2 Created cpupool 0 with scheduler SMP Credit Scheduler rev2 (credit2) 3 cpupool_add_domain(dom=0,pool=0) n_dom 1 rc 0 (XEN) wrap: 0 (XEN) debugtrace_dump() global buffer finished (XEN) (XEN) **************************************** (XEN) Panic on CPU 1: (XEN) FATAL PAGE FAULT (XEN) [error_code=0000] (XEN) Faulting linear address: 0000000000000000 (XEN) **************************************** Cheers,
On 28.01.2021 14:41, Julien Grall wrote: > Hi Oleksandr, > > On 25/01/2021 19:08, Oleksandr Tyshchenko wrote: >> From: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com> >> >> The IOREQ is a common feature now and these fields will be used >> on Arm as is. Move them to common struct vcpu as a part of new >> struct vcpu_io and drop duplicating "io" prefixes. Also move >> enum hvm_io_completion to xen/sched.h and remove "hvm" prefixes. >> >> This patch completely removes layering violation in the common code. >> >> Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com> >> Reviewed-by: Julien Grall <jgrall@amazon.com> >> Reviewed-by: Paul Durrant <paul@xen.org> >> Acked-by: Jan Beulich <jbeulich@suse.com> >> CC: Julien Grall <julien.grall@arm.com> >> [On Arm only] >> Tested-by: Wei Chen <Wei.Chen@arm.com> > > I seem to have trouble running Xen on x86 platform after this patch is > applied (see trace below). > > The bisector pointed out to this patch but I can't quite figure out why > this is breaking. > > Does this ring any bell to someone? Memory overwriting / corruption? This ... > (XEN) *** Serial input to DOM0 (type 'CTRL-a' three times to switch input) > (XEN) ----[ Xen-4.15-unstable x86_64 debug=n gcov=y Tainted: C ]---- > (XEN) CPU: 1 > (XEN) RIP: e008:[<ffff82d04041c1c7>] > x86_64/entry.S#restore_all_guest+0x7/0x145 > (XEN) RFLAGS: 0000000000010002 CONTEXT: hypervisor (d0v0) > (XEN) rax: 00000000000000ff rbx: ffff83027c806000 rcx: ffff82d0406c9a80 > (XEN) rdx: 0000000000000000 rsi: fffffffffffffed9 rdi: 0000000000000001 > (XEN) rbp: ffff83027c887df0 rsp: ffff83027c887ef8 r8: 00000000aaa8946e > (XEN) r9: 0000000000000002 r10: ffff83027c806040 r11: ffff83027c8cc020 > (XEN) r12: ffff83027c80f000 r13: ffff83027c895000 r14: 0000000000000000 > (XEN) r15: 0000000000000000 cr0: 0000000080050033 cr4: 00000000003426e0 > (XEN) cr3: 0000000273a2d000 cr2: 0000000000000000 > (XEN) fsb: 0000000000000000 gsb: 0000000000000000 gss: 0000000000000000 > (XEN) ds: 0000 es: 0000 fs: 0000 gs: 0000 ss: 0000 cs: e008 > (XEN) Xen code around <ffff82d04041c1c7> > (x86_64/entry.S#restore_all_guest+0x7/0x145): > (XEN) 00 48 8b 93 98 0d 00 00 <44> 8b 3a 4c 8b 8b 68 0b 00 00 ba ff 7f ... is restore_all_guest: ASSERT_INTERRUPTS_DISABLED /* Stash guest SPEC_CTRL value while we can read struct vcpu. */ mov VCPU_arch_msrs(%rbx), %rdx mov VCPUMSR_spec_ctrl_raw(%rdx), %r15d i.e. Dom0/vCPU0's v->arch.msrs got zeroed in an unintended way, hence %rdx is zero here (%rbx looks at least plausible). I take it that you double check this isn't an incremental build issue, i.e. entry.o for some reason not having got rebuilt despite struct vcpu's layout having changed? Jan > 00 00 48 > (XEN) Xen stack trace from rsp=ffff83027c887ef8: > (XEN) 0000000000000000 0000000000000000 0000000000000000 0000000000000000 > (XEN) 0000000000000000 0000000000000000 0000000000000000 0000000000000000 > (XEN) 0000000000000000 0000000000000000 0000000000000000 0000000000000000 > (XEN) 0000000000000000 ffffffff83a2c000 0000000000000000 0000000000000000 > (XEN) ffffffff82c9e160 000000000000e033 0000000000000200 ffffffff83a4f000 > (XEN) 000000000000e02b 0000000000000000 0000000000000000 00000000ffffffff > (XEN) 0000000000000000 0000e01000000001 ffff83027c806000 000000323c1d9000 > (XEN) 00000000003426e0 0000000000000000 0000000000000000 0000060100000000 > (XEN) 0000000000000000 > (XEN) Xen call trace: > (XEN) [<ffff82d04041c1c7>] R x86_64/entry.S#restore_all_guest+0x7/0x145 > (XEN) > (XEN) Pagetable walk from 0000000000000000: > (XEN) L4[0x000] = 0000000000000000 ffffffffffffffff > (XEN) Xen lock profile info SHOW (now = 4770175959 total = 4770175959) > (XEN) Global xenpf_lock: addr=ffff82d04052c4a0, lockval=00000000, not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global rtc_lock: addr=ffff82d04052c480, lockval=00010001, not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global pit_lock: addr=ffff82d04052c470, lockval=00000000, not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global platform_timer_lock: addr=ffff82d04052c460, > lockval=000e000e, not locked > (XEN) lock:6(57390), block:0(0) > (XEN) Global sync_lock: addr=ffff82d04052c440, lockval=00000000, not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global lock: addr=ffff82d04052c450, lockval=00000000, not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global flush_lock: addr=ffff82d04052c430, lockval=00080007, cpu=0 > (XEN) lock:3(49953), block:0(0) > (XEN) Global pci_config_lock: addr=ffff82d04052c420, lockval=22702270, > not locked > (XEN) lock:14(100313), block:0(0) > (XEN) Global lapic_nmi_owner_lock: addr=ffff82d04052bff0, > lockval=00000000, not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global map_pgdir_lock: addr=ffff82d04052bfd0, lockval=00050005, > not locked > (XEN) lock:3(386), block:0(0) > (XEN) Global vector_lock: addr=ffff82d04052bfc0, lockval=00260026, not > locked > (XEN) lock:1(383), block:0(0) > (XEN) Global irq_ratelimit_lock: addr=ffff82d04052bfb0, > lockval=00000000, not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global lock: addr=ffff82d04052bf90, lockval=00000000, not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global msix_fixmap_lock: addr=ffff82d04052bf80, lockval=00000000, > not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global ioapic_lock: addr=ffff82d04052bf60, lockval=003a003a, not > locked > (XEN) lock:0(0), block:0(0) > (XEN) Global i8259A_lock: addr=ffff82d04052bf50, lockval=00210021, not > locked > (XEN) lock:0(0), block:0(0) > (XEN) Global osvw_lock: addr=ffff82d04052bf10, lockval=00000000, not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global vpmu_lock: addr=ffff82d04052bf00, lockval=00040004, not locked > (XEN) lock:4(449), block:0(0) > (XEN) Global mtrr_mutex: addr=ffff82d04052bcf0, lockval=00000000, not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global set_atomicity_lock: addr=ffff82d04052bce0, > lockval=00040004, not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global microcode_mutex: addr=ffff82d04052bcc0, lockval=00030003, > not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global cmci_discover_lock: addr=ffff82d04052bc90, > lockval=00000000, not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global mce_logout_lock: addr=ffff82d04052bc70, lockval=00000000, > not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global processing_lock: addr=ffff82d04052bc50, lockval=00000000, > not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global pm_lock: addr=ffff82d04052bbc0, lockval=00000000, not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global apei_iomaps_lock: addr=ffff82d04052bbb0, lockval=00000000, > not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global erst_lock: addr=ffff82d04052bba0, lockval=00000000, not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global shared_intremap_lock: addr=ffff82d04052bb20, > lockval=00000000, not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global console_lock: addr=ffff82d04052b7d0, lockval=01350135, not > locked > (XEN) lock:154(111298857), block:0(0) > (XEN) Global ratelimit_lock: addr=ffff82d04052b7c0, lockval=00000000, > not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global lock: addr=ffff82d04052b7a0, lockval=00000000, not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global sched_free_cpu_lock: addr=ffff82d04052b770, > lockval=00040004, not locked > (XEN) lock:4(2963), block:0(0) > (XEN) Global cpupool_lock: addr=ffff82d04052b750, lockval=00030003, not > locked > (XEN) lock:3(19403), block:0(0) > (XEN) Global sysctl_lock: addr=ffff82d04052b740, lockval=00000000, not > locked > (XEN) lock:0(0), block:0(0) > (XEN) Global domctl_lock: addr=ffff82d04052b730, lockval=00000000, not > locked > (XEN) lock:0(0), block:0(0) > (XEN) Global pool_list_lock: addr=ffff82d04052b720, lockval=00010001, > not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global xenoprof_lock: addr=ffff82d04052b710, lockval=00000000, not > locked > (XEN) lock:0(0), block:0(0) > (XEN) Global pmu_owner_lock: addr=ffff82d04052b700, lockval=00000000, > not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global vm_lock: addr=ffff82d04052b6f0, lockval=00130013, not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global virtual_region_lock: addr=ffff82d04052b6e0, > lockval=00010001, not locked > (XEN) lock:1(198), block:0(0) > (XEN) Global wc_lock: addr=ffff82d04052b6d0, lockval=00020002, not locked > (XEN) lock:1(770), block:0(0) > (XEN) Global tasklet_lock: addr=ffff82d04052b6c0, lockval=03790379, not > locked > (XEN) lock:612(52925), block:0(0) > (XEN) Global symbols_mutex: addr=ffff82d04052b6b0, lockval=00000000, not > locked > (XEN) lock:0(0), block:0(0) > (XEN) Global stopmachine_lock: addr=ffff82d04052b6a0, lockval=00000000, > not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global call_lock: addr=ffff82d04052b680, lockval=00040004, not locked > (XEN) lock:3(27688741), block:0(0) > (XEN) Global lock: addr=ffff82d04052b560, lockval=00000000, not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global heap_lock: addr=ffff82d04052b540, lockval=c5d0c5d0, not locked > (XEN) lock:16949(5481440), block:1(34420) > (XEN) Global crash_notes_lock: addr=ffff82d04052b520, lockval=00000000, > not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global lock: addr=ffff82d04052b500, lockval=00000000, not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global global_virq_handlers_lock: addr=ffff82d04052b4f0, > lockval=00000000, not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global domlist_update_lock: addr=ffff82d04052b4d0, > lockval=00010001, not locked > (XEN) lock:1(105), block:0(0) > (XEN) Global debugtrace_lock: addr=ffff82d04052b4b0, lockval=00030003, > not locked > (XEN) lock:3(3327), block:0(0) > (XEN) Global accounting_lock: addr=ffff82d04052b480, lockval=00000000, > not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global efi_rs_lock: addr=ffff82d04052b020, lockval=00000000, not > locked > (XEN) lock:0(0), block:0(0) > (XEN) Domain 0 event_lock: addr=ffff83027c80f110, lockval=00000000, not > locked > (XEN) lock:0(0), block:0(0) > (XEN) Domain 0 page_alloc_lock: addr=ffff83027c80f028, lockval=05cf05cf, > not locked > (XEN) lock:1487(3914530), block:0(0) > (XEN) Domain 0 domain_lock: addr=ffff83027c80f018, lockval=00000000, not > locked > (XEN) lock:0(0), block:0(0) > (XEN) Domain 32767 page_alloc_lock: addr=ffff83027c875028, > lockval=00000000, not locked > (XEN) lock:0(0), block:0(0) > (XEN) Domain 32767 domain_lock: addr=ffff83027c875018, lockval=00000000, > not locked > (XEN) lock:0(0), block:0(0) > (XEN) Domain 32753 page_alloc_lock: addr=ffff83027c8c8028, > lockval=00000000, not locked > (XEN) lock:0(0), block:0(0) > (XEN) Domain 32753 domain_lock: addr=ffff83027c8c8018, lockval=00000000, > not locked > (XEN) lock:0(0), block:0(0) > (XEN) Domain 32754 page_alloc_lock: addr=ffff83027c8c9028, > lockval=18001800, not locked > (XEN) lock:6144(101574), block:0(0) > (XEN) Domain 32754 domain_lock: addr=ffff83027c8c9018, lockval=00000000, > not locked > (XEN) lock:0(0), block:0(0) > (XEN) debugtrace_dump() global buffer starting > 1 cpupool_create(pool=0,sched=0) > 2 Created cpupool 0 with scheduler SMP Credit Scheduler rev2 (credit2) > 3 cpupool_add_domain(dom=0,pool=0) n_dom 1 rc 0 > (XEN) wrap: 0 > (XEN) debugtrace_dump() global buffer finished > (XEN) > (XEN) **************************************** > (XEN) Panic on CPU 1: > (XEN) FATAL PAGE FAULT > (XEN) [error_code=0000] > (XEN) Faulting linear address: 0000000000000000 > (XEN) **************************************** > > > Cheers, >
On 28.01.21 15:41, Julien Grall wrote: > Hi Oleksandr, Hi Julien > > On 25/01/2021 19:08, Oleksandr Tyshchenko wrote: >> From: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com> >> >> The IOREQ is a common feature now and these fields will be used >> on Arm as is. Move them to common struct vcpu as a part of new >> struct vcpu_io and drop duplicating "io" prefixes. Also move >> enum hvm_io_completion to xen/sched.h and remove "hvm" prefixes. >> >> This patch completely removes layering violation in the common code. >> >> Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com> >> Reviewed-by: Julien Grall <jgrall@amazon.com> >> Reviewed-by: Paul Durrant <paul@xen.org> >> Acked-by: Jan Beulich <jbeulich@suse.com> >> CC: Julien Grall <julien.grall@arm.com> >> [On Arm only] >> Tested-by: Wei Chen <Wei.Chen@arm.com> > > I seem to have trouble running Xen on x86 platform after this patch is > applied (see trace below). First of all thank you for testing! I admit, I didn't physically test on x86 (but performed various build tests). Also, in current patch I mostly did (or even all) mechanical changes which wouldn't change a behavior I think, what is more that I tried to retain existing x86 behavior as much as I could not only in current patch, but among all patches in this series. > > > The bisector pointed out to this patch but I can't quite figure out > why this is breaking. > > Does this ring any bell to someone? > > (XEN) *** Serial input to DOM0 (type 'CTRL-a' three times to switch > input) > (XEN) ----[ Xen-4.15-unstable x86_64 debug=n gcov=y Tainted: C ]---- > (XEN) CPU: 1 > (XEN) RIP: e008:[<ffff82d04041c1c7>] > x86_64/entry.S#restore_all_guest+0x7/0x145 > (XEN) RFLAGS: 0000000000010002 CONTEXT: hypervisor (d0v0) > (XEN) rax: 00000000000000ff rbx: ffff83027c806000 rcx: > ffff82d0406c9a80 > (XEN) rdx: 0000000000000000 rsi: fffffffffffffed9 rdi: > 0000000000000001 > (XEN) rbp: ffff83027c887df0 rsp: ffff83027c887ef8 r8: > 00000000aaa8946e > (XEN) r9: 0000000000000002 r10: ffff83027c806040 r11: > ffff83027c8cc020 > (XEN) r12: ffff83027c80f000 r13: ffff83027c895000 r14: > 0000000000000000 > (XEN) r15: 0000000000000000 cr0: 0000000080050033 cr4: > 00000000003426e0 > (XEN) cr3: 0000000273a2d000 cr2: 0000000000000000 > (XEN) fsb: 0000000000000000 gsb: 0000000000000000 gss: > 0000000000000000 > (XEN) ds: 0000 es: 0000 fs: 0000 gs: 0000 ss: 0000 cs: e008 > (XEN) Xen code around <ffff82d04041c1c7> > (x86_64/entry.S#restore_all_guest+0x7/0x145): > (XEN) 00 48 8b 93 98 0d 00 00 <44> 8b 3a 4c 8b 8b 68 0b 00 00 ba ff > 7f 00 00 48 > (XEN) Xen stack trace from rsp=ffff83027c887ef8: > (XEN) 0000000000000000 0000000000000000 0000000000000000 > 0000000000000000 > (XEN) 0000000000000000 0000000000000000 0000000000000000 > 0000000000000000 > (XEN) 0000000000000000 0000000000000000 0000000000000000 > 0000000000000000 > (XEN) 0000000000000000 ffffffff83a2c000 0000000000000000 > 0000000000000000 > (XEN) ffffffff82c9e160 000000000000e033 0000000000000200 > ffffffff83a4f000 > (XEN) 000000000000e02b 0000000000000000 0000000000000000 > 00000000ffffffff > (XEN) 0000000000000000 0000e01000000001 ffff83027c806000 > 000000323c1d9000 > (XEN) 00000000003426e0 0000000000000000 0000000000000000 > 0000060100000000 > (XEN) 0000000000000000 > (XEN) Xen call trace: > (XEN) [<ffff82d04041c1c7>] R > x86_64/entry.S#restore_all_guest+0x7/0x145 > (XEN) > (XEN) Pagetable walk from 0000000000000000: > (XEN) L4[0x000] = 0000000000000000 ffffffffffffffff > (XEN) Xen lock profile info SHOW (now = 4770175959 total = 4770175959) > (XEN) Global xenpf_lock: addr=ffff82d04052c4a0, lockval=00000000, not > locked > (XEN) lock:0(0), block:0(0) > (XEN) Global rtc_lock: addr=ffff82d04052c480, lockval=00010001, not > locked > (XEN) lock:0(0), block:0(0) > (XEN) Global pit_lock: addr=ffff82d04052c470, lockval=00000000, not > locked > (XEN) lock:0(0), block:0(0) > (XEN) Global platform_timer_lock: addr=ffff82d04052c460, > lockval=000e000e, not locked > (XEN) lock:6(57390), block:0(0) > (XEN) Global sync_lock: addr=ffff82d04052c440, lockval=00000000, not > locked > (XEN) lock:0(0), block:0(0) > (XEN) Global lock: addr=ffff82d04052c450, lockval=00000000, not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global flush_lock: addr=ffff82d04052c430, lockval=00080007, cpu=0 > (XEN) lock:3(49953), block:0(0) > (XEN) Global pci_config_lock: addr=ffff82d04052c420, lockval=22702270, > not locked > (XEN) lock:14(100313), block:0(0) > (XEN) Global lapic_nmi_owner_lock: addr=ffff82d04052bff0, > lockval=00000000, not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global map_pgdir_lock: addr=ffff82d04052bfd0, lockval=00050005, > not locked > (XEN) lock:3(386), block:0(0) > (XEN) Global vector_lock: addr=ffff82d04052bfc0, lockval=00260026, not > locked > (XEN) lock:1(383), block:0(0) > (XEN) Global irq_ratelimit_lock: addr=ffff82d04052bfb0, > lockval=00000000, not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global lock: addr=ffff82d04052bf90, lockval=00000000, not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global msix_fixmap_lock: addr=ffff82d04052bf80, > lockval=00000000, not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global ioapic_lock: addr=ffff82d04052bf60, lockval=003a003a, not > locked > (XEN) lock:0(0), block:0(0) > (XEN) Global i8259A_lock: addr=ffff82d04052bf50, lockval=00210021, not > locked > (XEN) lock:0(0), block:0(0) > (XEN) Global osvw_lock: addr=ffff82d04052bf10, lockval=00000000, not > locked > (XEN) lock:0(0), block:0(0) > (XEN) Global vpmu_lock: addr=ffff82d04052bf00, lockval=00040004, not > locked > (XEN) lock:4(449), block:0(0) > (XEN) Global mtrr_mutex: addr=ffff82d04052bcf0, lockval=00000000, not > locked > (XEN) lock:0(0), block:0(0) > (XEN) Global set_atomicity_lock: addr=ffff82d04052bce0, > lockval=00040004, not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global microcode_mutex: addr=ffff82d04052bcc0, lockval=00030003, > not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global cmci_discover_lock: addr=ffff82d04052bc90, > lockval=00000000, not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global mce_logout_lock: addr=ffff82d04052bc70, lockval=00000000, > not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global processing_lock: addr=ffff82d04052bc50, lockval=00000000, > not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global pm_lock: addr=ffff82d04052bbc0, lockval=00000000, not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global apei_iomaps_lock: addr=ffff82d04052bbb0, > lockval=00000000, not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global erst_lock: addr=ffff82d04052bba0, lockval=00000000, not > locked > (XEN) lock:0(0), block:0(0) > (XEN) Global shared_intremap_lock: addr=ffff82d04052bb20, > lockval=00000000, not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global console_lock: addr=ffff82d04052b7d0, lockval=01350135, > not locked > (XEN) lock:154(111298857), block:0(0) > (XEN) Global ratelimit_lock: addr=ffff82d04052b7c0, lockval=00000000, > not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global lock: addr=ffff82d04052b7a0, lockval=00000000, not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global sched_free_cpu_lock: addr=ffff82d04052b770, > lockval=00040004, not locked > (XEN) lock:4(2963), block:0(0) > (XEN) Global cpupool_lock: addr=ffff82d04052b750, lockval=00030003, > not locked > (XEN) lock:3(19403), block:0(0) > (XEN) Global sysctl_lock: addr=ffff82d04052b740, lockval=00000000, not > locked > (XEN) lock:0(0), block:0(0) > (XEN) Global domctl_lock: addr=ffff82d04052b730, lockval=00000000, not > locked > (XEN) lock:0(0), block:0(0) > (XEN) Global pool_list_lock: addr=ffff82d04052b720, lockval=00010001, > not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global xenoprof_lock: addr=ffff82d04052b710, lockval=00000000, > not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global pmu_owner_lock: addr=ffff82d04052b700, lockval=00000000, > not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global vm_lock: addr=ffff82d04052b6f0, lockval=00130013, not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global virtual_region_lock: addr=ffff82d04052b6e0, > lockval=00010001, not locked > (XEN) lock:1(198), block:0(0) > (XEN) Global wc_lock: addr=ffff82d04052b6d0, lockval=00020002, not locked > (XEN) lock:1(770), block:0(0) > (XEN) Global tasklet_lock: addr=ffff82d04052b6c0, lockval=03790379, > not locked > (XEN) lock:612(52925), block:0(0) > (XEN) Global symbols_mutex: addr=ffff82d04052b6b0, lockval=00000000, > not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global stopmachine_lock: addr=ffff82d04052b6a0, > lockval=00000000, not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global call_lock: addr=ffff82d04052b680, lockval=00040004, not > locked > (XEN) lock:3(27688741), block:0(0) > (XEN) Global lock: addr=ffff82d04052b560, lockval=00000000, not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global heap_lock: addr=ffff82d04052b540, lockval=c5d0c5d0, not > locked > (XEN) lock:16949(5481440), block:1(34420) > (XEN) Global crash_notes_lock: addr=ffff82d04052b520, > lockval=00000000, not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global lock: addr=ffff82d04052b500, lockval=00000000, not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global global_virq_handlers_lock: addr=ffff82d04052b4f0, > lockval=00000000, not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global domlist_update_lock: addr=ffff82d04052b4d0, > lockval=00010001, not locked > (XEN) lock:1(105), block:0(0) > (XEN) Global debugtrace_lock: addr=ffff82d04052b4b0, lockval=00030003, > not locked > (XEN) lock:3(3327), block:0(0) > (XEN) Global accounting_lock: addr=ffff82d04052b480, lockval=00000000, > not locked > (XEN) lock:0(0), block:0(0) > (XEN) Global efi_rs_lock: addr=ffff82d04052b020, lockval=00000000, not > locked > (XEN) lock:0(0), block:0(0) > (XEN) Domain 0 event_lock: addr=ffff83027c80f110, lockval=00000000, > not locked > (XEN) lock:0(0), block:0(0) > (XEN) Domain 0 page_alloc_lock: addr=ffff83027c80f028, > lockval=05cf05cf, not locked > (XEN) lock:1487(3914530), block:0(0) > (XEN) Domain 0 domain_lock: addr=ffff83027c80f018, lockval=00000000, > not locked > (XEN) lock:0(0), block:0(0) > (XEN) Domain 32767 page_alloc_lock: addr=ffff83027c875028, > lockval=00000000, not locked > (XEN) lock:0(0), block:0(0) > (XEN) Domain 32767 domain_lock: addr=ffff83027c875018, > lockval=00000000, not locked > (XEN) lock:0(0), block:0(0) > (XEN) Domain 32753 page_alloc_lock: addr=ffff83027c8c8028, > lockval=00000000, not locked > (XEN) lock:0(0), block:0(0) > (XEN) Domain 32753 domain_lock: addr=ffff83027c8c8018, > lockval=00000000, not locked > (XEN) lock:0(0), block:0(0) > (XEN) Domain 32754 page_alloc_lock: addr=ffff83027c8c9028, > lockval=18001800, not locked > (XEN) lock:6144(101574), block:0(0) > (XEN) Domain 32754 domain_lock: addr=ffff83027c8c9018, > lockval=00000000, not locked > (XEN) lock:0(0), block:0(0) > (XEN) debugtrace_dump() global buffer starting > 1 cpupool_create(pool=0,sched=0) > 2 Created cpupool 0 with scheduler SMP Credit Scheduler rev2 (credit2) > 3 cpupool_add_domain(dom=0,pool=0) n_dom 1 rc 0 > (XEN) wrap: 0 > (XEN) debugtrace_dump() global buffer finished > (XEN) > (XEN) **************************************** > (XEN) Panic on CPU 1: > (XEN) FATAL PAGE FAULT > (XEN) [error_code=0000] > (XEN) Faulting linear address: 0000000000000000 > (XEN) **************************************** > > > Cheers, >
Hi Jan, On 28/01/2021 13:53, Jan Beulich wrote: > On 28.01.2021 14:41, Julien Grall wrote: >> Hi Oleksandr, >> >> On 25/01/2021 19:08, Oleksandr Tyshchenko wrote: >>> From: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com> >>> >>> The IOREQ is a common feature now and these fields will be used >>> on Arm as is. Move them to common struct vcpu as a part of new >>> struct vcpu_io and drop duplicating "io" prefixes. Also move >>> enum hvm_io_completion to xen/sched.h and remove "hvm" prefixes. >>> >>> This patch completely removes layering violation in the common code. >>> >>> Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com> >>> Reviewed-by: Julien Grall <jgrall@amazon.com> >>> Reviewed-by: Paul Durrant <paul@xen.org> >>> Acked-by: Jan Beulich <jbeulich@suse.com> >>> CC: Julien Grall <julien.grall@arm.com> >>> [On Arm only] >>> Tested-by: Wei Chen <Wei.Chen@arm.com> >> >> I seem to have trouble running Xen on x86 platform after this patch is >> applied (see trace below). >> >> The bisector pointed out to this patch but I can't quite figure out why >> this is breaking. >> >> Does this ring any bell to someone? > > Memory overwriting / corruption? This ... > >> (XEN) *** Serial input to DOM0 (type 'CTRL-a' three times to switch input) >> (XEN) ----[ Xen-4.15-unstable x86_64 debug=n gcov=y Tainted: C ]---- >> (XEN) CPU: 1 >> (XEN) RIP: e008:[<ffff82d04041c1c7>] >> x86_64/entry.S#restore_all_guest+0x7/0x145 >> (XEN) RFLAGS: 0000000000010002 CONTEXT: hypervisor (d0v0) >> (XEN) rax: 00000000000000ff rbx: ffff83027c806000 rcx: ffff82d0406c9a80 >> (XEN) rdx: 0000000000000000 rsi: fffffffffffffed9 rdi: 0000000000000001 >> (XEN) rbp: ffff83027c887df0 rsp: ffff83027c887ef8 r8: 00000000aaa8946e >> (XEN) r9: 0000000000000002 r10: ffff83027c806040 r11: ffff83027c8cc020 >> (XEN) r12: ffff83027c80f000 r13: ffff83027c895000 r14: 0000000000000000 >> (XEN) r15: 0000000000000000 cr0: 0000000080050033 cr4: 00000000003426e0 >> (XEN) cr3: 0000000273a2d000 cr2: 0000000000000000 >> (XEN) fsb: 0000000000000000 gsb: 0000000000000000 gss: 0000000000000000 >> (XEN) ds: 0000 es: 0000 fs: 0000 gs: 0000 ss: 0000 cs: e008 >> (XEN) Xen code around <ffff82d04041c1c7> >> (x86_64/entry.S#restore_all_guest+0x7/0x145): >> (XEN) 00 48 8b 93 98 0d 00 00 <44> 8b 3a 4c 8b 8b 68 0b 00 00 ba ff 7f > > ... is > > restore_all_guest: > ASSERT_INTERRUPTS_DISABLED > > /* Stash guest SPEC_CTRL value while we can read struct vcpu. */ > mov VCPU_arch_msrs(%rbx), %rdx > mov VCPUMSR_spec_ctrl_raw(%rdx), %r15d > > i.e. Dom0/vCPU0's v->arch.msrs got zeroed in an unintended way, > hence %rdx is zero here (%rbx looks at least plausible). > > I take it that you double check this isn't an incremental build > issue, i.e. entry.o for some reason not having got rebuilt > despite struct vcpu's layout having changed? I was going to reply back on my e-mail with more debugging information. It seems that this is a build issue as if I clean the repo the error disappear. The error happens when I move from staging to a batch with this series applied without a cleaning the tree. It also happens the other way around as well. Removing entry.o or asm-offsets.h before building doesn't help. Any other idea? On a side note, it looks like asm-offsets.h doesn't get rebuild when Kconfig change. I noticed an issue when trying to turn on the perf counters. Cheers,
On 28.01.2021 15:21, Julien Grall wrote: > On 28/01/2021 13:53, Jan Beulich wrote: >> On 28.01.2021 14:41, Julien Grall wrote: >>> On 25/01/2021 19:08, Oleksandr Tyshchenko wrote: >>>> From: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com> >>>> >>>> The IOREQ is a common feature now and these fields will be used >>>> on Arm as is. Move them to common struct vcpu as a part of new >>>> struct vcpu_io and drop duplicating "io" prefixes. Also move >>>> enum hvm_io_completion to xen/sched.h and remove "hvm" prefixes. >>>> >>>> This patch completely removes layering violation in the common code. >>>> >>>> Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com> >>>> Reviewed-by: Julien Grall <jgrall@amazon.com> >>>> Reviewed-by: Paul Durrant <paul@xen.org> >>>> Acked-by: Jan Beulich <jbeulich@suse.com> >>>> CC: Julien Grall <julien.grall@arm.com> >>>> [On Arm only] >>>> Tested-by: Wei Chen <Wei.Chen@arm.com> >>> >>> I seem to have trouble running Xen on x86 platform after this patch is >>> applied (see trace below). >>> >>> The bisector pointed out to this patch but I can't quite figure out why >>> this is breaking. >>> >>> Does this ring any bell to someone? >> >> Memory overwriting / corruption? This ... >> >>> (XEN) *** Serial input to DOM0 (type 'CTRL-a' three times to switch input) >>> (XEN) ----[ Xen-4.15-unstable x86_64 debug=n gcov=y Tainted: C ]---- >>> (XEN) CPU: 1 >>> (XEN) RIP: e008:[<ffff82d04041c1c7>] >>> x86_64/entry.S#restore_all_guest+0x7/0x145 >>> (XEN) RFLAGS: 0000000000010002 CONTEXT: hypervisor (d0v0) >>> (XEN) rax: 00000000000000ff rbx: ffff83027c806000 rcx: ffff82d0406c9a80 >>> (XEN) rdx: 0000000000000000 rsi: fffffffffffffed9 rdi: 0000000000000001 >>> (XEN) rbp: ffff83027c887df0 rsp: ffff83027c887ef8 r8: 00000000aaa8946e >>> (XEN) r9: 0000000000000002 r10: ffff83027c806040 r11: ffff83027c8cc020 >>> (XEN) r12: ffff83027c80f000 r13: ffff83027c895000 r14: 0000000000000000 >>> (XEN) r15: 0000000000000000 cr0: 0000000080050033 cr4: 00000000003426e0 >>> (XEN) cr3: 0000000273a2d000 cr2: 0000000000000000 >>> (XEN) fsb: 0000000000000000 gsb: 0000000000000000 gss: 0000000000000000 >>> (XEN) ds: 0000 es: 0000 fs: 0000 gs: 0000 ss: 0000 cs: e008 >>> (XEN) Xen code around <ffff82d04041c1c7> >>> (x86_64/entry.S#restore_all_guest+0x7/0x145): >>> (XEN) 00 48 8b 93 98 0d 00 00 <44> 8b 3a 4c 8b 8b 68 0b 00 00 ba ff 7f >> >> ... is >> >> restore_all_guest: >> ASSERT_INTERRUPTS_DISABLED >> >> /* Stash guest SPEC_CTRL value while we can read struct vcpu. */ >> mov VCPU_arch_msrs(%rbx), %rdx >> mov VCPUMSR_spec_ctrl_raw(%rdx), %r15d >> >> i.e. Dom0/vCPU0's v->arch.msrs got zeroed in an unintended way, >> hence %rdx is zero here (%rbx looks at least plausible). >> >> I take it that you double check this isn't an incremental build >> issue, i.e. entry.o for some reason not having got rebuilt >> despite struct vcpu's layout having changed? > > I was going to reply back on my e-mail with more debugging information. > It seems that this is a build issue as if I clean the repo the error > disappear. > > The error happens when I move from staging to a batch with this series > applied without a cleaning the tree. It also happens the other way > around as well. > > Removing entry.o or asm-offsets.h before building doesn't help. Any > other idea? No, I'd need to know how exactly to repro and then try to debug. > On a side note, it looks like asm-offsets.h doesn't get rebuild when > Kconfig change. I noticed an issue when trying to turn on the perf counters. That's bad and needs fixing. Assuming you mean the kconfig change in fact incurs a change to asm-offsets.h. Otherwise there might be a move-if-changed somewhere preventing unnecessary rebuilding. Jan
On 28/01/2021 14:36, Jan Beulich wrote: > On 28.01.2021 15:21, Julien Grall wrote: >> I was going to reply back on my e-mail with more debugging information. >> It seems that this is a build issue as if I clean the repo the error >> disappear. >> >> The error happens when I move from staging to a batch with this series >> applied without a cleaning the tree. It also happens the other way >> around as well. >> >> Removing entry.o or asm-offsets.h before building doesn't help. Any >> other idea? > No, I'd need to know how exactly to repro and then try to debug. > >> On a side note, it looks like asm-offsets.h doesn't get rebuild when >> Kconfig change. I noticed an issue when trying to turn on the perf counters. > That's bad and needs fixing. Assuming you mean the kconfig change > in fact incurs a change to asm-offsets.h. Otherwise there might > be a move-if-changed somewhere preventing unnecessary rebuilding. FYI this is the incremental build breakage I reported in IRC last week. There is definitely a missing dependency somewhere, but I just far haven't been able to precisely what. ~Andrew
Jan Beulich writes ("Re: [PATCH V5 10/22] xen/ioreq: Move x86's io_completion/io_req fields to struct vcpu"): > On 28.01.2021 15:21, Julien Grall wrote: > > It seems that this is a build issue as if I clean the repo the error > > disappear. > > > > The error happens when I move from staging to a batch with this series > > applied without a cleaning the tree. It also happens the other way > > around as well. How vexing. > > Removing entry.o or asm-offsets.h before building doesn't help. Any > > other idea? > > No, I'd need to know how exactly to repro and then try to debug. IMO this problem is not a blocker for pushing this today or tomorrow. Unless someone disagrees ? Ian.
On 28.01.2021 15:51, Ian Jackson wrote: > Jan Beulich writes ("Re: [PATCH V5 10/22] xen/ioreq: Move x86's io_completion/io_req fields to struct vcpu"): >> On 28.01.2021 15:21, Julien Grall wrote: >>> It seems that this is a build issue as if I clean the repo the error >>> disappear. >>> >>> The error happens when I move from staging to a batch with this series >>> applied without a cleaning the tree. It also happens the other way >>> around as well. > > How vexing. > >>> Removing entry.o or asm-offsets.h before building doesn't help. Any >>> other idea? >> >> No, I'd need to know how exactly to repro and then try to debug. > > IMO this problem is not a blocker for pushing this today or tomorrow. > Unless someone disagrees ? No, I don't think this is caused by this series, and Andrew's reply of having noticed the same supports this. Jan
Hi Jan, On 28/01/2021 14:36, Jan Beulich wrote: > On 28.01.2021 15:21, Julien Grall wrote: >> On 28/01/2021 13:53, Jan Beulich wrote: >>> On 28.01.2021 14:41, Julien Grall wrote: >>>> On 25/01/2021 19:08, Oleksandr Tyshchenko wrote: >>>>> From: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com> >>>>> >>>>> The IOREQ is a common feature now and these fields will be used >>>>> on Arm as is. Move them to common struct vcpu as a part of new >>>>> struct vcpu_io and drop duplicating "io" prefixes. Also move >>>>> enum hvm_io_completion to xen/sched.h and remove "hvm" prefixes. >>>>> >>>>> This patch completely removes layering violation in the common code. >>>>> >>>>> Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com> >>>>> Reviewed-by: Julien Grall <jgrall@amazon.com> >>>>> Reviewed-by: Paul Durrant <paul@xen.org> >>>>> Acked-by: Jan Beulich <jbeulich@suse.com> >>>>> CC: Julien Grall <julien.grall@arm.com> >>>>> [On Arm only] >>>>> Tested-by: Wei Chen <Wei.Chen@arm.com> >>>> >>>> I seem to have trouble running Xen on x86 platform after this patch is >>>> applied (see trace below). >>>> >>>> The bisector pointed out to this patch but I can't quite figure out why >>>> this is breaking. >>>> >>>> Does this ring any bell to someone? >>> >>> Memory overwriting / corruption? This ... >>> >>>> (XEN) *** Serial input to DOM0 (type 'CTRL-a' three times to switch input) >>>> (XEN) ----[ Xen-4.15-unstable x86_64 debug=n gcov=y Tainted: C ]---- >>>> (XEN) CPU: 1 >>>> (XEN) RIP: e008:[<ffff82d04041c1c7>] >>>> x86_64/entry.S#restore_all_guest+0x7/0x145 >>>> (XEN) RFLAGS: 0000000000010002 CONTEXT: hypervisor (d0v0) >>>> (XEN) rax: 00000000000000ff rbx: ffff83027c806000 rcx: ffff82d0406c9a80 >>>> (XEN) rdx: 0000000000000000 rsi: fffffffffffffed9 rdi: 0000000000000001 >>>> (XEN) rbp: ffff83027c887df0 rsp: ffff83027c887ef8 r8: 00000000aaa8946e >>>> (XEN) r9: 0000000000000002 r10: ffff83027c806040 r11: ffff83027c8cc020 >>>> (XEN) r12: ffff83027c80f000 r13: ffff83027c895000 r14: 0000000000000000 >>>> (XEN) r15: 0000000000000000 cr0: 0000000080050033 cr4: 00000000003426e0 >>>> (XEN) cr3: 0000000273a2d000 cr2: 0000000000000000 >>>> (XEN) fsb: 0000000000000000 gsb: 0000000000000000 gss: 0000000000000000 >>>> (XEN) ds: 0000 es: 0000 fs: 0000 gs: 0000 ss: 0000 cs: e008 >>>> (XEN) Xen code around <ffff82d04041c1c7> >>>> (x86_64/entry.S#restore_all_guest+0x7/0x145): >>>> (XEN) 00 48 8b 93 98 0d 00 00 <44> 8b 3a 4c 8b 8b 68 0b 00 00 ba ff 7f >>> >>> ... is >>> >>> restore_all_guest: >>> ASSERT_INTERRUPTS_DISABLED >>> >>> /* Stash guest SPEC_CTRL value while we can read struct vcpu. */ >>> mov VCPU_arch_msrs(%rbx), %rdx >>> mov VCPUMSR_spec_ctrl_raw(%rdx), %r15d >>> >>> i.e. Dom0/vCPU0's v->arch.msrs got zeroed in an unintended way, >>> hence %rdx is zero here (%rbx looks at least plausible). >>> >>> I take it that you double check this isn't an incremental build >>> issue, i.e. entry.o for some reason not having got rebuilt >>> despite struct vcpu's layout having changed? >> >> I was going to reply back on my e-mail with more debugging information. >> It seems that this is a build issue as if I clean the repo the error >> disappear. >> >> The error happens when I move from staging to a batch with this series >> applied without a cleaning the tree. It also happens the other way >> around as well. >> >> Removing entry.o or asm-offsets.h before building doesn't help. Any >> other idea? > > No, I'd need to know how exactly to repro and then try to debug. I have tried to remove all the *.o and still the same issues. So it may be something related to the generation of the headers. This is not related to this series, so I am not going to spend more time on this bug today. The way I reproduced it was to use [1] as .config (it is a random config from gitlab) and then switched between staging (2b4b33ffe7d6 "libs/foreignmemory: Implement on NetBSD" at the time of the writing) and the branch ioreq/ioreq_4.14_ml6 from [2]. Just in case it matters this on Ubuntu 18.04.5 with GCC 7.5.0. Cheers, [1] https://pastebin.com/S6ELa493 [2] https://github.com/otyshchenko1/xen.git
On 28/01/2021 14:54, Jan Beulich wrote: > On 28.01.2021 15:51, Ian Jackson wrote: >> Jan Beulich writes ("Re: [PATCH V5 10/22] xen/ioreq: Move x86's io_completion/io_req fields to struct vcpu"): >>>> Removing entry.o or asm-offsets.h before building doesn't help. Any >>>> other idea? >>> No, I'd need to know how exactly to repro and then try to debug. >> IMO this problem is not a blocker for pushing this today or tomorrow. >> Unless someone disagrees ? > No, I don't think this is caused by this series, and Andrew's > reply of having noticed the same supports this. I agree. A bug manifesting in exactly this way is already present in staging. ~Andrew
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c index 4d62199..21051ce 100644 --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -140,15 +140,15 @@ static const struct hvm_io_handler ioreq_server_handler = { */ void hvmemul_cancel(struct vcpu *v) { - struct hvm_vcpu_io *vio = &v->arch.hvm.hvm_io; + struct hvm_vcpu_io *hvio = &v->arch.hvm.hvm_io; - vio->io_req.state = STATE_IOREQ_NONE; - vio->io_completion = HVMIO_no_completion; - vio->mmio_cache_count = 0; - vio->mmio_insn_bytes = 0; - vio->mmio_access = (struct npfec){}; - vio->mmio_retry = false; - vio->g2m_ioport = NULL; + v->io.req.state = STATE_IOREQ_NONE; + v->io.completion = VIO_no_completion; + hvio->mmio_cache_count = 0; + hvio->mmio_insn_bytes = 0; + hvio->mmio_access = (struct npfec){}; + hvio->mmio_retry = false; + hvio->g2m_ioport = NULL; hvmemul_cache_disable(v); } @@ -159,7 +159,7 @@ static int hvmemul_do_io( { struct vcpu *curr = current; struct domain *currd = curr->domain; - struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io; + struct vcpu_io *vio = &curr->io; ioreq_t p = { .type = is_mmio ? IOREQ_TYPE_COPY : IOREQ_TYPE_PIO, .addr = addr, @@ -184,13 +184,13 @@ static int hvmemul_do_io( return X86EMUL_UNHANDLEABLE; } - switch ( vio->io_req.state ) + switch ( vio->req.state ) { case STATE_IOREQ_NONE: break; case STATE_IORESP_READY: - vio->io_req.state = STATE_IOREQ_NONE; - p = vio->io_req; + vio->req.state = STATE_IOREQ_NONE; + p = vio->req; /* Verify the emulation request has been correctly re-issued */ if ( (p.type != (is_mmio ? IOREQ_TYPE_COPY : IOREQ_TYPE_PIO)) || @@ -238,7 +238,7 @@ static int hvmemul_do_io( } ASSERT(p.count); - vio->io_req = p; + vio->req = p; rc = hvm_io_intercept(&p); @@ -247,12 +247,12 @@ static int hvmemul_do_io( * our callers and mirror this into latched state. */ ASSERT(p.count <= *reps); - *reps = vio->io_req.count = p.count; + *reps = vio->req.count = p.count; switch ( rc ) { case X86EMUL_OKAY: - vio->io_req.state = STATE_IOREQ_NONE; + vio->req.state = STATE_IOREQ_NONE; break; case X86EMUL_UNHANDLEABLE: { @@ -305,7 +305,7 @@ static int hvmemul_do_io( if ( s == NULL ) { rc = X86EMUL_RETRY; - vio->io_req.state = STATE_IOREQ_NONE; + vio->req.state = STATE_IOREQ_NONE; break; } @@ -316,7 +316,7 @@ static int hvmemul_do_io( if ( dir == IOREQ_READ ) { rc = hvm_process_io_intercept(&ioreq_server_handler, &p); - vio->io_req.state = STATE_IOREQ_NONE; + vio->req.state = STATE_IOREQ_NONE; break; } } @@ -329,14 +329,14 @@ static int hvmemul_do_io( if ( !s ) { rc = hvm_process_io_intercept(&null_handler, &p); - vio->io_req.state = STATE_IOREQ_NONE; + vio->req.state = STATE_IOREQ_NONE; } else { rc = hvm_send_ioreq(s, &p, 0); if ( rc != X86EMUL_RETRY || currd->is_shutting_down ) - vio->io_req.state = STATE_IOREQ_NONE; - else if ( !ioreq_needs_completion(&vio->io_req) ) + vio->req.state = STATE_IOREQ_NONE; + else if ( !ioreq_needs_completion(&vio->req) ) rc = X86EMUL_OKAY; } break; @@ -1005,14 +1005,14 @@ static int hvmemul_phys_mmio_access( * cache indexed by linear MMIO address. */ static struct hvm_mmio_cache *hvmemul_find_mmio_cache( - struct hvm_vcpu_io *vio, unsigned long gla, uint8_t dir, bool create) + struct hvm_vcpu_io *hvio, unsigned long gla, uint8_t dir, bool create) { unsigned int i; struct hvm_mmio_cache *cache; - for ( i = 0; i < vio->mmio_cache_count; i ++ ) + for ( i = 0; i < hvio->mmio_cache_count; i ++ ) { - cache = &vio->mmio_cache[i]; + cache = &hvio->mmio_cache[i]; if ( gla == cache->gla && dir == cache->dir ) @@ -1022,13 +1022,13 @@ static struct hvm_mmio_cache *hvmemul_find_mmio_cache( if ( !create ) return NULL; - i = vio->mmio_cache_count; - if( i == ARRAY_SIZE(vio->mmio_cache) ) + i = hvio->mmio_cache_count; + if( i == ARRAY_SIZE(hvio->mmio_cache) ) return NULL; - ++vio->mmio_cache_count; + ++hvio->mmio_cache_count; - cache = &vio->mmio_cache[i]; + cache = &hvio->mmio_cache[i]; memset(cache, 0, sizeof (*cache)); cache->gla = gla; @@ -1037,26 +1037,26 @@ static struct hvm_mmio_cache *hvmemul_find_mmio_cache( return cache; } -static void latch_linear_to_phys(struct hvm_vcpu_io *vio, unsigned long gla, +static void latch_linear_to_phys(struct hvm_vcpu_io *hvio, unsigned long gla, unsigned long gpa, bool_t write) { - if ( vio->mmio_access.gla_valid ) + if ( hvio->mmio_access.gla_valid ) return; - vio->mmio_gla = gla & PAGE_MASK; - vio->mmio_gpfn = PFN_DOWN(gpa); - vio->mmio_access = (struct npfec){ .gla_valid = 1, - .read_access = 1, - .write_access = write }; + hvio->mmio_gla = gla & PAGE_MASK; + hvio->mmio_gpfn = PFN_DOWN(gpa); + hvio->mmio_access = (struct npfec){ .gla_valid = 1, + .read_access = 1, + .write_access = write }; } static int hvmemul_linear_mmio_access( unsigned long gla, unsigned int size, uint8_t dir, void *buffer, uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt, bool_t known_gpfn) { - struct hvm_vcpu_io *vio = ¤t->arch.hvm.hvm_io; + struct hvm_vcpu_io *hvio = ¤t->arch.hvm.hvm_io; unsigned long offset = gla & ~PAGE_MASK; - struct hvm_mmio_cache *cache = hvmemul_find_mmio_cache(vio, gla, dir, true); + struct hvm_mmio_cache *cache = hvmemul_find_mmio_cache(hvio, gla, dir, true); unsigned int chunk, buffer_offset = 0; paddr_t gpa; unsigned long one_rep = 1; @@ -1068,7 +1068,7 @@ static int hvmemul_linear_mmio_access( chunk = min_t(unsigned int, size, PAGE_SIZE - offset); if ( known_gpfn ) - gpa = pfn_to_paddr(vio->mmio_gpfn) | offset; + gpa = pfn_to_paddr(hvio->mmio_gpfn) | offset; else { rc = hvmemul_linear_to_phys(gla, &gpa, chunk, &one_rep, pfec, @@ -1076,7 +1076,7 @@ static int hvmemul_linear_mmio_access( if ( rc != X86EMUL_OKAY ) return rc; - latch_linear_to_phys(vio, gla, gpa, dir == IOREQ_WRITE); + latch_linear_to_phys(hvio, gla, gpa, dir == IOREQ_WRITE); } for ( ;; ) @@ -1122,22 +1122,22 @@ static inline int hvmemul_linear_mmio_write( static bool known_gla(unsigned long addr, unsigned int bytes, uint32_t pfec) { - const struct hvm_vcpu_io *vio = ¤t->arch.hvm.hvm_io; + const struct hvm_vcpu_io *hvio = ¤t->arch.hvm.hvm_io; if ( pfec & PFEC_write_access ) { - if ( !vio->mmio_access.write_access ) + if ( !hvio->mmio_access.write_access ) return false; } else if ( pfec & PFEC_insn_fetch ) { - if ( !vio->mmio_access.insn_fetch ) + if ( !hvio->mmio_access.insn_fetch ) return false; } - else if ( !vio->mmio_access.read_access ) + else if ( !hvio->mmio_access.read_access ) return false; - return (vio->mmio_gla == (addr & PAGE_MASK) && + return (hvio->mmio_gla == (addr & PAGE_MASK) && (addr & ~PAGE_MASK) + bytes <= PAGE_SIZE); } @@ -1145,7 +1145,7 @@ static int linear_read(unsigned long addr, unsigned int bytes, void *p_data, uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt) { pagefault_info_t pfinfo; - struct hvm_vcpu_io *vio = ¤t->arch.hvm.hvm_io; + struct hvm_vcpu_io *hvio = ¤t->arch.hvm.hvm_io; unsigned int offset = addr & ~PAGE_MASK; int rc = HVMTRANS_bad_gfn_to_mfn; @@ -1167,7 +1167,7 @@ static int linear_read(unsigned long addr, unsigned int bytes, void *p_data, * we handle this access in the same way to guarantee completion and hence * clean up any interim state. */ - if ( !hvmemul_find_mmio_cache(vio, addr, IOREQ_READ, false) ) + if ( !hvmemul_find_mmio_cache(hvio, addr, IOREQ_READ, false) ) rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo); switch ( rc ) @@ -1200,7 +1200,7 @@ static int linear_write(unsigned long addr, unsigned int bytes, void *p_data, uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt) { pagefault_info_t pfinfo; - struct hvm_vcpu_io *vio = ¤t->arch.hvm.hvm_io; + struct hvm_vcpu_io *hvio = ¤t->arch.hvm.hvm_io; unsigned int offset = addr & ~PAGE_MASK; int rc = HVMTRANS_bad_gfn_to_mfn; @@ -1222,7 +1222,7 @@ static int linear_write(unsigned long addr, unsigned int bytes, void *p_data, * we handle this access in the same way to guarantee completion and hence * clean up any interim state. */ - if ( !hvmemul_find_mmio_cache(vio, addr, IOREQ_WRITE, false) ) + if ( !hvmemul_find_mmio_cache(hvio, addr, IOREQ_WRITE, false) ) rc = hvm_copy_to_guest_linear(addr, p_data, bytes, pfec, &pfinfo); switch ( rc ) @@ -1599,7 +1599,7 @@ static int hvmemul_cmpxchg( struct vcpu *curr = current; unsigned long addr; uint32_t pfec = PFEC_page_present | PFEC_write_access; - struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io; + struct hvm_vcpu_io *hvio = &curr->arch.hvm.hvm_io; int rc; void *mapping = NULL; @@ -1625,8 +1625,8 @@ static int hvmemul_cmpxchg( /* Fix this in case the guest is really relying on r-m-w atomicity. */ return hvmemul_linear_mmio_write(addr, bytes, p_new, pfec, hvmemul_ctxt, - vio->mmio_access.write_access && - vio->mmio_gla == (addr & PAGE_MASK)); + hvio->mmio_access.write_access && + hvio->mmio_gla == (addr & PAGE_MASK)); } switch ( bytes ) @@ -1823,7 +1823,7 @@ static int hvmemul_rep_movs( struct hvm_emulate_ctxt *hvmemul_ctxt = container_of(ctxt, struct hvm_emulate_ctxt, ctxt); struct vcpu *curr = current; - struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io; + struct hvm_vcpu_io *hvio = &curr->arch.hvm.hvm_io; unsigned long saddr, daddr, bytes; paddr_t sgpa, dgpa; uint32_t pfec = PFEC_page_present; @@ -1846,18 +1846,18 @@ static int hvmemul_rep_movs( if ( hvmemul_ctxt->seg_reg[x86_seg_ss].dpl == 3 ) pfec |= PFEC_user_mode; - if ( vio->mmio_access.read_access && - (vio->mmio_gla == (saddr & PAGE_MASK)) && + if ( hvio->mmio_access.read_access && + (hvio->mmio_gla == (saddr & PAGE_MASK)) && /* * Upon initial invocation don't truncate large batches just because * of a hit for the translation: Doing the guest page table walk is * cheaper than multiple round trips through the device model. Yet * when processing a response we can always re-use the translation. */ - (vio->io_req.state == STATE_IORESP_READY || + (curr->io.req.state == STATE_IORESP_READY || ((!df || *reps == 1) && PAGE_SIZE - (saddr & ~PAGE_MASK) >= *reps * bytes_per_rep)) ) - sgpa = pfn_to_paddr(vio->mmio_gpfn) | (saddr & ~PAGE_MASK); + sgpa = pfn_to_paddr(hvio->mmio_gpfn) | (saddr & ~PAGE_MASK); else { rc = hvmemul_linear_to_phys(saddr, &sgpa, bytes_per_rep, reps, pfec, @@ -1867,13 +1867,13 @@ static int hvmemul_rep_movs( } bytes = PAGE_SIZE - (daddr & ~PAGE_MASK); - if ( vio->mmio_access.write_access && - (vio->mmio_gla == (daddr & PAGE_MASK)) && + if ( hvio->mmio_access.write_access && + (hvio->mmio_gla == (daddr & PAGE_MASK)) && /* See comment above. */ - (vio->io_req.state == STATE_IORESP_READY || + (curr->io.req.state == STATE_IORESP_READY || ((!df || *reps == 1) && PAGE_SIZE - (daddr & ~PAGE_MASK) >= *reps * bytes_per_rep)) ) - dgpa = pfn_to_paddr(vio->mmio_gpfn) | (daddr & ~PAGE_MASK); + dgpa = pfn_to_paddr(hvio->mmio_gpfn) | (daddr & ~PAGE_MASK); else { rc = hvmemul_linear_to_phys(daddr, &dgpa, bytes_per_rep, reps, @@ -1892,14 +1892,14 @@ static int hvmemul_rep_movs( if ( sp2mt == p2m_mmio_dm ) { - latch_linear_to_phys(vio, saddr, sgpa, 0); + latch_linear_to_phys(hvio, saddr, sgpa, 0); return hvmemul_do_mmio_addr( sgpa, reps, bytes_per_rep, IOREQ_READ, df, dgpa); } if ( dp2mt == p2m_mmio_dm ) { - latch_linear_to_phys(vio, daddr, dgpa, 1); + latch_linear_to_phys(hvio, daddr, dgpa, 1); return hvmemul_do_mmio_addr( dgpa, reps, bytes_per_rep, IOREQ_WRITE, df, sgpa); } @@ -1992,7 +1992,7 @@ static int hvmemul_rep_stos( struct hvm_emulate_ctxt *hvmemul_ctxt = container_of(ctxt, struct hvm_emulate_ctxt, ctxt); struct vcpu *curr = current; - struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io; + struct hvm_vcpu_io *hvio = &curr->arch.hvm.hvm_io; unsigned long addr, bytes; paddr_t gpa; p2m_type_t p2mt; @@ -2004,13 +2004,13 @@ static int hvmemul_rep_stos( return rc; bytes = PAGE_SIZE - (addr & ~PAGE_MASK); - if ( vio->mmio_access.write_access && - (vio->mmio_gla == (addr & PAGE_MASK)) && + if ( hvio->mmio_access.write_access && + (hvio->mmio_gla == (addr & PAGE_MASK)) && /* See respective comment in MOVS processing. */ - (vio->io_req.state == STATE_IORESP_READY || + (curr->io.req.state == STATE_IORESP_READY || ((!df || *reps == 1) && PAGE_SIZE - (addr & ~PAGE_MASK) >= *reps * bytes_per_rep)) ) - gpa = pfn_to_paddr(vio->mmio_gpfn) | (addr & ~PAGE_MASK); + gpa = pfn_to_paddr(hvio->mmio_gpfn) | (addr & ~PAGE_MASK); else { uint32_t pfec = PFEC_page_present | PFEC_write_access; @@ -2103,7 +2103,7 @@ static int hvmemul_rep_stos( return X86EMUL_UNHANDLEABLE; case p2m_mmio_dm: - latch_linear_to_phys(vio, addr, gpa, 1); + latch_linear_to_phys(hvio, addr, gpa, 1); return hvmemul_do_mmio_buffer(gpa, reps, bytes_per_rep, IOREQ_WRITE, df, p_data); } @@ -2613,18 +2613,18 @@ static const struct x86_emulate_ops hvm_emulate_ops_no_write = { }; /* - * Note that passing HVMIO_no_completion into this function serves as kind + * Note that passing VIO_no_completion into this function serves as kind * of (but not fully) an "auto select completion" indicator. When there's * no completion needed, the passed in value will be ignored in any case. */ static int _hvm_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt, const struct x86_emulate_ops *ops, - enum hvm_io_completion completion) + enum vio_completion completion) { const struct cpu_user_regs *regs = hvmemul_ctxt->ctxt.regs; struct vcpu *curr = current; uint32_t new_intr_shadow; - struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io; + struct hvm_vcpu_io *hvio = &curr->arch.hvm.hvm_io; int rc; /* @@ -2632,45 +2632,45 @@ static int _hvm_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt, * untouched if it's already enabled, for re-execution to consume * entries populated by an earlier pass. */ - if ( vio->cache->num_ents > vio->cache->max_ents ) + if ( hvio->cache->num_ents > hvio->cache->max_ents ) { - ASSERT(vio->io_req.state == STATE_IOREQ_NONE); - vio->cache->num_ents = 0; + ASSERT(curr->io.req.state == STATE_IOREQ_NONE); + hvio->cache->num_ents = 0; } else - ASSERT(vio->io_req.state == STATE_IORESP_READY); + ASSERT(curr->io.req.state == STATE_IORESP_READY); - hvm_emulate_init_per_insn(hvmemul_ctxt, vio->mmio_insn, - vio->mmio_insn_bytes); + hvm_emulate_init_per_insn(hvmemul_ctxt, hvio->mmio_insn, + hvio->mmio_insn_bytes); - vio->mmio_retry = 0; + hvio->mmio_retry = 0; rc = x86_emulate(&hvmemul_ctxt->ctxt, ops); - if ( rc == X86EMUL_OKAY && vio->mmio_retry ) + if ( rc == X86EMUL_OKAY && hvio->mmio_retry ) rc = X86EMUL_RETRY; - if ( !ioreq_needs_completion(&vio->io_req) ) - completion = HVMIO_no_completion; - else if ( completion == HVMIO_no_completion ) - completion = (vio->io_req.type != IOREQ_TYPE_PIO || - hvmemul_ctxt->is_mem_access) ? HVMIO_mmio_completion - : HVMIO_pio_completion; + if ( !ioreq_needs_completion(&curr->io.req) ) + completion = VIO_no_completion; + else if ( completion == VIO_no_completion ) + completion = (curr->io.req.type != IOREQ_TYPE_PIO || + hvmemul_ctxt->is_mem_access) ? VIO_mmio_completion + : VIO_pio_completion; - switch ( vio->io_completion = completion ) + switch ( curr->io.completion = completion ) { - case HVMIO_no_completion: - case HVMIO_pio_completion: - vio->mmio_cache_count = 0; - vio->mmio_insn_bytes = 0; - vio->mmio_access = (struct npfec){}; + case VIO_no_completion: + case VIO_pio_completion: + hvio->mmio_cache_count = 0; + hvio->mmio_insn_bytes = 0; + hvio->mmio_access = (struct npfec){}; hvmemul_cache_disable(curr); break; - case HVMIO_mmio_completion: - case HVMIO_realmode_completion: - BUILD_BUG_ON(sizeof(vio->mmio_insn) < sizeof(hvmemul_ctxt->insn_buf)); - vio->mmio_insn_bytes = hvmemul_ctxt->insn_buf_bytes; - memcpy(vio->mmio_insn, hvmemul_ctxt->insn_buf, vio->mmio_insn_bytes); + case VIO_mmio_completion: + case VIO_realmode_completion: + BUILD_BUG_ON(sizeof(hvio->mmio_insn) < sizeof(hvmemul_ctxt->insn_buf)); + hvio->mmio_insn_bytes = hvmemul_ctxt->insn_buf_bytes; + memcpy(hvio->mmio_insn, hvmemul_ctxt->insn_buf, hvio->mmio_insn_bytes); break; default: @@ -2716,7 +2716,7 @@ static int _hvm_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt, int hvm_emulate_one( struct hvm_emulate_ctxt *hvmemul_ctxt, - enum hvm_io_completion completion) + enum vio_completion completion) { return _hvm_emulate_one(hvmemul_ctxt, &hvm_emulate_ops, completion); } @@ -2754,7 +2754,7 @@ int hvm_emulate_one_mmio(unsigned long mfn, unsigned long gla) guest_cpu_user_regs()); ctxt.ctxt.data = &mmio_ro_ctxt; - switch ( rc = _hvm_emulate_one(&ctxt, ops, HVMIO_no_completion) ) + switch ( rc = _hvm_emulate_one(&ctxt, ops, VIO_no_completion) ) { case X86EMUL_UNHANDLEABLE: case X86EMUL_UNIMPLEMENTED: @@ -2782,28 +2782,28 @@ void hvm_emulate_one_vm_event(enum emul_kind kind, unsigned int trapnr, { case EMUL_KIND_NOWRITE: rc = _hvm_emulate_one(&ctx, &hvm_emulate_ops_no_write, - HVMIO_no_completion); + VIO_no_completion); break; case EMUL_KIND_SET_CONTEXT_INSN: { struct vcpu *curr = current; - struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io; + struct hvm_vcpu_io *hvio = &curr->arch.hvm.hvm_io; - BUILD_BUG_ON(sizeof(vio->mmio_insn) != + BUILD_BUG_ON(sizeof(hvio->mmio_insn) != sizeof(curr->arch.vm_event->emul.insn.data)); - ASSERT(!vio->mmio_insn_bytes); + ASSERT(!hvio->mmio_insn_bytes); /* * Stash insn buffer into mmio buffer here instead of ctx * to avoid having to add more logic to hvm_emulate_one. */ - vio->mmio_insn_bytes = sizeof(vio->mmio_insn); - memcpy(vio->mmio_insn, curr->arch.vm_event->emul.insn.data, - vio->mmio_insn_bytes); + hvio->mmio_insn_bytes = sizeof(hvio->mmio_insn); + memcpy(hvio->mmio_insn, curr->arch.vm_event->emul.insn.data, + hvio->mmio_insn_bytes); } /* Fall-through */ default: ctx.set_context = (kind == EMUL_KIND_SET_CONTEXT_DATA); - rc = hvm_emulate_one(&ctx, HVMIO_no_completion); + rc = hvm_emulate_one(&ctx, VIO_no_completion); } switch ( rc ) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index bc96947..4ed929c 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -3800,7 +3800,7 @@ void hvm_ud_intercept(struct cpu_user_regs *regs) return; } - switch ( hvm_emulate_one(&ctxt, HVMIO_no_completion) ) + switch ( hvm_emulate_one(&ctxt, VIO_no_completion) ) { case X86EMUL_UNHANDLEABLE: case X86EMUL_UNIMPLEMENTED: diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c index ef8286b..dd733e1 100644 --- a/xen/arch/x86/hvm/io.c +++ b/xen/arch/x86/hvm/io.c @@ -85,7 +85,7 @@ bool hvm_emulate_one_insn(hvm_emulate_validate_t *validate, const char *descr) hvm_emulate_init_once(&ctxt, validate, guest_cpu_user_regs()); - switch ( rc = hvm_emulate_one(&ctxt, HVMIO_no_completion) ) + switch ( rc = hvm_emulate_one(&ctxt, VIO_no_completion) ) { case X86EMUL_UNHANDLEABLE: hvm_dump_emulation_state(XENLOG_G_WARNING, descr, &ctxt, rc); @@ -109,20 +109,20 @@ bool hvm_emulate_one_insn(hvm_emulate_validate_t *validate, const char *descr) bool handle_mmio_with_translation(unsigned long gla, unsigned long gpfn, struct npfec access) { - struct hvm_vcpu_io *vio = ¤t->arch.hvm.hvm_io; + struct hvm_vcpu_io *hvio = ¤t->arch.hvm.hvm_io; - vio->mmio_access = access.gla_valid && - access.kind == npfec_kind_with_gla - ? access : (struct npfec){}; - vio->mmio_gla = gla & PAGE_MASK; - vio->mmio_gpfn = gpfn; + hvio->mmio_access = access.gla_valid && + access.kind == npfec_kind_with_gla + ? access : (struct npfec){}; + hvio->mmio_gla = gla & PAGE_MASK; + hvio->mmio_gpfn = gpfn; return handle_mmio(); } bool handle_pio(uint16_t port, unsigned int size, int dir) { struct vcpu *curr = current; - struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io; + struct vcpu_io *vio = &curr->io; unsigned int data; int rc; @@ -135,8 +135,8 @@ bool handle_pio(uint16_t port, unsigned int size, int dir) rc = hvmemul_do_pio_buffer(port, size, dir, &data); - if ( ioreq_needs_completion(&vio->io_req) ) - vio->io_completion = HVMIO_pio_completion; + if ( ioreq_needs_completion(&vio->req) ) + vio->completion = VIO_pio_completion; switch ( rc ) { @@ -175,7 +175,7 @@ static bool_t g2m_portio_accept(const struct hvm_io_handler *handler, { struct vcpu *curr = current; const struct hvm_domain *hvm = &curr->domain->arch.hvm; - struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io; + struct hvm_vcpu_io *hvio = &curr->arch.hvm.hvm_io; struct g2m_ioport *g2m_ioport; unsigned int start, end; @@ -185,7 +185,7 @@ static bool_t g2m_portio_accept(const struct hvm_io_handler *handler, end = start + g2m_ioport->np; if ( (p->addr >= start) && (p->addr + p->size <= end) ) { - vio->g2m_ioport = g2m_ioport; + hvio->g2m_ioport = g2m_ioport; return 1; } } @@ -196,8 +196,8 @@ static bool_t g2m_portio_accept(const struct hvm_io_handler *handler, static int g2m_portio_read(const struct hvm_io_handler *handler, uint64_t addr, uint32_t size, uint64_t *data) { - struct hvm_vcpu_io *vio = ¤t->arch.hvm.hvm_io; - const struct g2m_ioport *g2m_ioport = vio->g2m_ioport; + struct hvm_vcpu_io *hvio = ¤t->arch.hvm.hvm_io; + const struct g2m_ioport *g2m_ioport = hvio->g2m_ioport; unsigned int mport = (addr - g2m_ioport->gport) + g2m_ioport->mport; switch ( size ) @@ -221,8 +221,8 @@ static int g2m_portio_read(const struct hvm_io_handler *handler, static int g2m_portio_write(const struct hvm_io_handler *handler, uint64_t addr, uint32_t size, uint64_t data) { - struct hvm_vcpu_io *vio = ¤t->arch.hvm.hvm_io; - const struct g2m_ioport *g2m_ioport = vio->g2m_ioport; + struct hvm_vcpu_io *hvio = ¤t->arch.hvm.hvm_io; + const struct g2m_ioport *g2m_ioport = hvio->g2m_ioport; unsigned int mport = (addr - g2m_ioport->gport) + g2m_ioport->mport; switch ( size ) diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c index 0cadf34..62a4b33 100644 --- a/xen/arch/x86/hvm/ioreq.c +++ b/xen/arch/x86/hvm/ioreq.c @@ -40,11 +40,11 @@ bool arch_ioreq_complete_mmio(void) return handle_mmio(); } -bool arch_vcpu_ioreq_completion(enum hvm_io_completion io_completion) +bool arch_vcpu_ioreq_completion(enum vio_completion completion) { - switch ( io_completion ) + switch ( completion ) { - case HVMIO_realmode_completion: + case VIO_realmode_completion: { struct hvm_emulate_ctxt ctxt; diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c index fcfccf7..6d90630 100644 --- a/xen/arch/x86/hvm/svm/nestedsvm.c +++ b/xen/arch/x86/hvm/svm/nestedsvm.c @@ -1266,7 +1266,7 @@ enum hvm_intblk nsvm_intr_blocked(struct vcpu *v) * Delay the injection because this would result in delivering * an interrupt *within* the execution of an instruction. */ - if ( v->arch.hvm.hvm_io.io_req.state != STATE_IOREQ_NONE ) + if ( v->io.req.state != STATE_IOREQ_NONE ) return hvm_intblk_shadow; if ( !nv->nv_vmexit_pending && n2vmcb->exit_int_info.v ) diff --git a/xen/arch/x86/hvm/vmx/realmode.c b/xen/arch/x86/hvm/vmx/realmode.c index 768f01e..cc23afa 100644 --- a/xen/arch/x86/hvm/vmx/realmode.c +++ b/xen/arch/x86/hvm/vmx/realmode.c @@ -101,7 +101,7 @@ void vmx_realmode_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt) perfc_incr(realmode_emulations); - rc = hvm_emulate_one(hvmemul_ctxt, HVMIO_realmode_completion); + rc = hvm_emulate_one(hvmemul_ctxt, VIO_realmode_completion); if ( rc == X86EMUL_UNHANDLEABLE ) { @@ -153,7 +153,7 @@ void vmx_realmode(struct cpu_user_regs *regs) struct vcpu *curr = current; struct hvm_emulate_ctxt hvmemul_ctxt; struct segment_register *sreg; - struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io; + struct hvm_vcpu_io *hvio = &curr->arch.hvm.hvm_io; unsigned long intr_info; unsigned int emulations = 0; @@ -188,7 +188,7 @@ void vmx_realmode(struct cpu_user_regs *regs) vmx_realmode_emulate_one(&hvmemul_ctxt); - if ( vio->io_req.state != STATE_IOREQ_NONE || vio->mmio_retry ) + if ( curr->io.req.state != STATE_IOREQ_NONE || hvio->mmio_retry ) break; /* Stop emulating unless our segment state is not safe */ @@ -202,7 +202,7 @@ void vmx_realmode(struct cpu_user_regs *regs) } /* Need to emulate next time if we've started an IO operation */ - if ( vio->io_req.state != STATE_IOREQ_NONE ) + if ( curr->io.req.state != STATE_IOREQ_NONE ) curr->arch.hvm.vmx.vmx_emulate = 1; if ( !curr->arch.hvm.vmx.vmx_emulate && !curr->arch.hvm.vmx.vmx_realmode ) diff --git a/xen/common/ioreq.c b/xen/common/ioreq.c index 84bce36..ce3ef59 100644 --- a/xen/common/ioreq.c +++ b/xen/common/ioreq.c @@ -159,7 +159,7 @@ static bool hvm_wait_for_io(struct ioreq_vcpu *sv, ioreq_t *p) break; } - p = &sv->vcpu->arch.hvm.hvm_io.io_req; + p = &sv->vcpu->io.req; if ( ioreq_needs_completion(p) ) p->data = data; @@ -171,10 +171,10 @@ static bool hvm_wait_for_io(struct ioreq_vcpu *sv, ioreq_t *p) bool handle_hvm_io_completion(struct vcpu *v) { struct domain *d = v->domain; - struct hvm_vcpu_io *vio = &v->arch.hvm.hvm_io; + struct vcpu_io *vio = &v->io; struct ioreq_server *s; struct ioreq_vcpu *sv; - enum hvm_io_completion io_completion; + enum vio_completion completion; if ( has_vpci(d) && vpci_process_pending(v) ) { @@ -186,29 +186,29 @@ bool handle_hvm_io_completion(struct vcpu *v) if ( sv && !hvm_wait_for_io(sv, get_ioreq(s, v)) ) return false; - vio->io_req.state = ioreq_needs_completion(&vio->io_req) ? + vio->req.state = ioreq_needs_completion(&vio->req) ? STATE_IORESP_READY : STATE_IOREQ_NONE; msix_write_completion(v); vcpu_end_shutdown_deferral(v); - io_completion = vio->io_completion; - vio->io_completion = HVMIO_no_completion; + completion = vio->completion; + vio->completion = VIO_no_completion; - switch ( io_completion ) + switch ( completion ) { - case HVMIO_no_completion: + case VIO_no_completion: break; - case HVMIO_mmio_completion: + case VIO_mmio_completion: return arch_ioreq_complete_mmio(); - case HVMIO_pio_completion: - return handle_pio(vio->io_req.addr, vio->io_req.size, - vio->io_req.dir); + case VIO_pio_completion: + return handle_pio(vio->req.addr, vio->req.size, + vio->req.dir); default: - return arch_vcpu_ioreq_completion(io_completion); + return arch_vcpu_ioreq_completion(completion); } return true; diff --git a/xen/include/asm-x86/hvm/emulate.h b/xen/include/asm-x86/hvm/emulate.h index 1620cc7..610078b 100644 --- a/xen/include/asm-x86/hvm/emulate.h +++ b/xen/include/asm-x86/hvm/emulate.h @@ -65,7 +65,7 @@ bool __nonnull(1, 2) hvm_emulate_one_insn( const char *descr); int hvm_emulate_one( struct hvm_emulate_ctxt *hvmemul_ctxt, - enum hvm_io_completion completion); + enum vio_completion completion); void hvm_emulate_one_vm_event(enum emul_kind kind, unsigned int trapnr, unsigned int errcode); diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h index 6c1feda..8adf455 100644 --- a/xen/include/asm-x86/hvm/vcpu.h +++ b/xen/include/asm-x86/hvm/vcpu.h @@ -28,13 +28,6 @@ #include <asm/mtrr.h> #include <public/hvm/ioreq.h> -enum hvm_io_completion { - HVMIO_no_completion, - HVMIO_mmio_completion, - HVMIO_pio_completion, - HVMIO_realmode_completion -}; - struct hvm_vcpu_asid { uint64_t generation; uint32_t asid; @@ -52,10 +45,6 @@ struct hvm_mmio_cache { }; struct hvm_vcpu_io { - /* I/O request in flight to device model. */ - enum hvm_io_completion io_completion; - ioreq_t io_req; - /* * HVM emulation: * Linear address @mmio_gla maps to MMIO physical frame @mmio_gpfn. diff --git a/xen/include/xen/ioreq.h b/xen/include/xen/ioreq.h index 60e864d..eace1d3 100644 --- a/xen/include/xen/ioreq.h +++ b/xen/include/xen/ioreq.h @@ -107,7 +107,7 @@ void hvm_ioreq_init(struct domain *d); int ioreq_server_dm_op(struct xen_dm_op *op, struct domain *d, bool *const_op); bool arch_ioreq_complete_mmio(void); -bool arch_vcpu_ioreq_completion(enum hvm_io_completion io_completion); +bool arch_vcpu_ioreq_completion(enum vio_completion completion); int arch_ioreq_server_map_pages(struct ioreq_server *s); void arch_ioreq_server_unmap_pages(struct ioreq_server *s); void arch_ioreq_server_enable(struct ioreq_server *s); diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h index f437ee3..59e5b6a 100644 --- a/xen/include/xen/sched.h +++ b/xen/include/xen/sched.h @@ -147,6 +147,21 @@ void evtchn_destroy_final(struct domain *d); /* from complete_domain_destroy */ struct waitqueue_vcpu; +enum vio_completion { + VIO_no_completion, + VIO_mmio_completion, + VIO_pio_completion, +#ifdef CONFIG_X86 + VIO_realmode_completion, +#endif +}; + +struct vcpu_io { + /* I/O request in flight to device model. */ + enum vio_completion completion; + ioreq_t req; +}; + struct vcpu { int vcpu_id; @@ -258,6 +273,10 @@ struct vcpu struct vpci_vcpu vpci; struct arch_vcpu arch; + +#ifdef CONFIG_IOREQ_SERVER + struct vcpu_io io; +#endif }; struct sched_unit {