@@ -151,6 +151,8 @@ KVM_X86_OP(vcpu_deliver_sipi_vector)
KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons);
KVM_X86_OP_OPTIONAL(get_untagged_addr)
KVM_X86_OP_OPTIONAL_RET0(gmem_max_level)
+KVM_X86_OP_OPTIONAL(pre_memory_mapping);
+KVM_X86_OP_OPTIONAL(post_memory_mapping);
#undef KVM_X86_OP
#undef KVM_X86_OP_OPTIONAL
@@ -1839,6 +1839,11 @@ struct kvm_x86_ops {
int (*gmem_max_level)(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn,
bool is_private, u8 *max_level);
+ int (*pre_memory_mapping)(struct kvm_vcpu *vcpu,
+ struct kvm_memory_mapping *mapping,
+ u64 *error_code, u8 *max_level);
+ void (*post_memory_mapping)(struct kvm_vcpu *vcpu,
+ struct kvm_memory_mapping *mapping);
};
struct kvm_x86_nested_ops {
@@ -5826,10 +5826,21 @@ int kvm_arch_vcpu_memory_mapping(struct kvm_vcpu *vcpu,
u8 max_level = KVM_MAX_HUGEPAGE_LEVEL;
u64 error_code = PFERR_WRITE_MASK;
u8 goal_level = PG_LEVEL_4K;
- int r;
+ int r = 0;
+
+ if (kvm_x86_ops.pre_memory_mapping)
+ r = static_call(kvm_x86_pre_memory_mapping)(vcpu, mapping, &error_code, &max_level);
+ else {
+ if (mapping->source)
+ r = -EINVAL;
+ }
+ if (r)
+ return r;
r = kvm_mmu_map_tdp_page(vcpu, gfn_to_gpa(mapping->base_gfn), error_code,
max_level, &goal_level);
+ if (kvm_x86_ops.post_memory_mapping)
+ static_call(kvm_x86_post_memory_mapping)(vcpu, mapping);
if (r)
return r;