@@ -135,12 +135,6 @@ bool arch_ioreq_complete_mmio(void)
return false;
}
-bool arch_vcpu_ioreq_completion(enum vio_completion completion)
-{
- ASSERT_UNREACHABLE();
- return true;
-}
-
/*
* The "legacy" mechanism of mapping magic pages for the IOREQ servers
* is x86 specific, so the following hooks don't need to be implemented on Arm:
@@ -29,31 +29,6 @@ bool arch_ioreq_complete_mmio(void)
return handle_mmio();
}
-bool arch_vcpu_ioreq_completion(enum vio_completion completion)
-{
- switch ( completion )
- {
-#ifdef CONFIG_VMX
- case VIO_realmode_completion:
- {
- struct hvm_emulate_ctxt ctxt;
-
- hvm_emulate_init_once(&ctxt, NULL, guest_cpu_user_regs());
- vmx_realmode_emulate_one(&ctxt);
- hvm_emulate_writeback(&ctxt);
-
- break;
- }
-#endif
-
- default:
- ASSERT_UNREACHABLE();
- break;
- }
-
- return true;
-}
-
static gfn_t hvm_alloc_legacy_ioreq_gfn(struct ioreq_server *s)
{
struct domain *d = s->target;
@@ -2749,6 +2749,20 @@ static void cf_check vmx_set_reg(struct vcpu *v, unsigned int reg, uint64_t val)
vmx_vmcs_exit(v);
}
+bool realmode_vcpu_ioreq_completion(enum vio_completion completion)
+{
+ struct hvm_emulate_ctxt ctxt;
+
+ if ( completion != VIO_realmode_completion )
+ ASSERT_UNREACHABLE();
+
+ hvm_emulate_init_once(&ctxt, NULL, guest_cpu_user_regs());
+ vmx_realmode_emulate_one(&ctxt);
+ hvm_emulate_writeback(&ctxt);
+
+ return true;
+}
+
static struct hvm_function_table __initdata_cf_clobber vmx_function_table = {
.name = "VMX",
.cpu_up_prepare = vmx_cpu_up_prepare,
@@ -3070,6 +3084,7 @@ const struct hvm_function_table * __init start_vmx(void)
lbr_tsx_fixup_check();
ler_to_fixup_check();
+ arch_vcpu_ioreq_completion = realmode_vcpu_ioreq_completion;
return &vmx_function_table;
}
@@ -33,6 +33,8 @@
#include <public/hvm/ioreq.h>
#include <public/hvm/params.h>
+bool (*arch_vcpu_ioreq_completion)(enum vio_completion completion) = NULL;
+
void ioreq_request_mapcache_invalidate(const struct domain *d)
{
struct vcpu *v = current;
@@ -244,7 +246,8 @@ bool vcpu_ioreq_handle_completion(struct vcpu *v)
break;
default:
- res = arch_vcpu_ioreq_completion(completion);
+ if ( arch_vcpu_ioreq_completion )
+ res = arch_vcpu_ioreq_completion(completion);
break;
}
@@ -111,7 +111,7 @@ void ioreq_domain_init(struct domain *d);
int ioreq_server_dm_op(struct xen_dm_op *op, struct domain *d, bool *const_op);
bool arch_ioreq_complete_mmio(void);
-bool arch_vcpu_ioreq_completion(enum vio_completion completion);
+extern bool (*arch_vcpu_ioreq_completion)(enum vio_completion completion);
int arch_ioreq_server_map_pages(struct ioreq_server *s);
void arch_ioreq_server_unmap_pages(struct ioreq_server *s);
void arch_ioreq_server_enable(struct ioreq_server *s);
For the most cases arch_vcpu_ioreq_completion() routine is just an empty stub, except when handling VIO_realmode_completion, which only happens on HVM domains running on VT-x machine. When VT-x is disabled in build configuration, both x86 & arm version of routine become empty stubs. To dispose of these useless stubs we can do optional call to arch-specific ioreq completion handler, if it's present, and drop arm and generic x86 handlers. Actual handling of VIO_realmore_completion can be done by VMX code then. Signed-off-by: Sergiy Kibrik <Sergiy_Kibrik@epam.com> --- xen/arch/arm/ioreq.c | 6 ------ xen/arch/x86/hvm/ioreq.c | 25 ------------------------- xen/arch/x86/hvm/vmx/vmx.c | 15 +++++++++++++++ xen/common/ioreq.c | 5 ++++- xen/include/xen/ioreq.h | 2 +- 5 files changed, 20 insertions(+), 33 deletions(-)