@@ -86,6 +86,7 @@ KVM_X86_OP(set_tss_addr)
KVM_X86_OP(set_identity_map_addr)
KVM_X86_OP(get_mt_mask)
KVM_X86_OP(load_mmu_pgd)
+KVM_X86_OP(complete_mmu_init)
KVM_X86_OP_NULL(has_wbinvd_exit)
KVM_X86_OP(write_l1_tsc_offset)
KVM_X86_OP(get_exit_info)
@@ -1251,6 +1251,8 @@ struct kvm_x86_ops {
void (*load_mmu_pgd)(struct kvm_vcpu *vcpu, unsigned long pgd,
int pgd_level);
+ void (*complete_mmu_init) (struct kvm_vcpu *vcpu);
+
bool (*has_wbinvd_exit)(void);
/* Returns actual tsc_offset set in active VMCS */
@@ -4774,6 +4774,8 @@ void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots)
init_kvm_tdp_mmu(vcpu);
else
init_kvm_softmmu(vcpu);
+
+ static_call(kvm_x86_complete_mmu_init)(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_init_mmu);
@@ -3913,6 +3913,11 @@ static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long root,
vmcb_mark_dirty(svm->vmcb, VMCB_CR);
}
+static void svm_complete_mmu_init(struct kvm_vcpu *vcpu)
+{
+
+}
+
static int is_disabled(void)
{
u64 vm_cr;
@@ -4522,6 +4527,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.write_l1_tsc_offset = svm_write_l1_tsc_offset,
.load_mmu_pgd = svm_load_mmu_pgd,
+ .complete_mmu_init = svm_complete_mmu_init,
.check_intercept = svm_check_intercept,
.handle_exit_irqoff = svm_handle_exit_irqoff,
@@ -3252,6 +3252,11 @@ static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long pgd,
vmcs_writel(GUEST_CR3, guest_cr3);
}
+static void vmx_complete_mmu_init(struct kvm_vcpu *vcpu)
+{
+
+}
+
static bool vmx_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
/*
@@ -7849,6 +7854,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.write_l1_tsc_offset = vmx_write_l1_tsc_offset,
.load_mmu_pgd = vmx_load_mmu_pgd,
+ .complete_mmu_init = vmx_complete_mmu_init,
.check_intercept = vmx_check_intercept,
.handle_exit_irqoff = vmx_handle_exit_irqoff,
This callback will be used to tweak the mmu context in arch specific code after it was reset. Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com> --- arch/x86/include/asm/kvm-x86-ops.h | 1 + arch/x86/include/asm/kvm_host.h | 2 ++ arch/x86/kvm/mmu/mmu.c | 2 ++ arch/x86/kvm/svm/svm.c | 6 ++++++ arch/x86/kvm/vmx/vmx.c | 6 ++++++ 5 files changed, 17 insertions(+)