@@ -1193,3 +1193,27 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu)
svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
}
+
+bool sev_pin_spte(struct kvm_vcpu *vcpu, gfn_t gfn, int level, kvm_pfn_t pfn)
+{
+ if (!sev_guest(vcpu->kvm))
+ return false;
+
+ get_page(pfn_to_page(pfn));
+
+ /*
+ * Flush any cached lines of the page being added since "ownership" of
+ * it will be transferred from the host to an encrypted guest.
+ */
+ clflush_cache_range(__va(pfn << PAGE_SHIFT), page_level_size(level));
+
+ return true;
+}
+
+void sev_drop_pinned_spte(struct kvm *kvm, gfn_t gfn, int level, kvm_pfn_t pfn)
+{
+ if (WARN_ON_ONCE(!sev_guest(kvm)))
+ return;
+
+ put_page(pfn_to_page(pfn));
+}
@@ -4150,6 +4150,9 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.need_emulation_on_page_fault = svm_need_emulation_on_page_fault,
.apic_init_signal_blocked = svm_apic_init_signal_blocked,
+
+ .pin_spte = sev_pin_spte,
+ .drop_pinned_spte = sev_drop_pinned_spte,
};
static struct kvm_x86_init_ops svm_init_ops __initdata = {
@@ -489,4 +489,7 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu);
int __init sev_hardware_setup(void);
void sev_hardware_teardown(void);
+bool sev_pin_spte(struct kvm_vcpu *vcpu, gfn_t gfn, int level, kvm_pfn_t pfn);
+void sev_drop_pinned_spte(struct kvm *kvm, gfn_t gfn, int level, kvm_pfn_t pfn);
+
#endif
Cc: eric van tassell <Eric.VanTassell@amd.com> Cc: Tom Lendacky <thomas.lendacky@amd.com> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> --- arch/x86/kvm/svm/sev.c | 24 ++++++++++++++++++++++++ arch/x86/kvm/svm/svm.c | 3 +++ arch/x86/kvm/svm/svm.h | 3 +++ 3 files changed, 30 insertions(+)