@@ -93,7 +93,8 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
bool __make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
const struct kvm_memory_slot *slot, unsigned int pte_access,
gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool prefetch,
- bool can_unsync, bool host_writable, u64 *new_spte)
+ bool can_unsync, bool host_writable, u64 mt_mask,
+ u64 *new_spte)
{
int level = sp->role.level;
u64 spte = SPTE_MMU_PRESENT_MASK;
@@ -130,8 +131,7 @@ bool __make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
if (level > PG_LEVEL_4K)
spte |= PT_PAGE_SIZE_MASK;
if (tdp_enabled)
- spte |= static_call(kvm_x86_get_mt_mask)(vcpu, gfn,
- kvm_is_mmio_pfn(pfn));
+ spte |= mt_mask;
if (host_writable)
spte |= shadow_host_writable_mask;
@@ -197,8 +197,12 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
u64 old_spte, bool prefetch, bool can_unsync,
bool host_writable, u64 *new_spte)
{
+ u64 mt_mask = static_call(kvm_x86_get_mt_mask)(vcpu, gfn,
+ kvm_is_mmio_pfn(pfn));
+
return __make_spte(vcpu, sp, slot, pte_access, gfn, pfn, old_spte,
- prefetch, can_unsync, host_writable, new_spte);
+ prefetch, can_unsync, host_writable, mt_mask,
+ new_spte);
}
@@ -413,7 +413,8 @@ static inline u64 get_mmio_spte_generation(u64 spte)
bool __make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
const struct kvm_memory_slot *slot, unsigned int pte_access,
gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool prefetch,
- bool can_unsync, bool host_writable, u64 *new_spte);
+ bool can_unsync, bool host_writable, u64 mt_mask,
+ u64 *new_spte);
bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
const struct kvm_memory_slot *slot,
unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
In service of removing the vCPU pointer from __make_spte, factor the memory type mask calculation out of __make_spte. Signed-off-by: Ben Gardon <bgardon@google.com> --- arch/x86/kvm/mmu/spte.c | 12 ++++++++---- arch/x86/kvm/mmu/spte.h | 3 ++- 2 files changed, 10 insertions(+), 5 deletions(-)