Message ID | 20200923191204.8410-1-sean.j.christopherson@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: x86/mmu: Stash 'kvm' in a local variable in kvm_mmu_free_roots() | expand |
Sean Christopherson <sean.j.christopherson@intel.com> writes: > To make kvm_mmu_free_roots() a bit more readable, capture 'kvm' in a > local variable instead of doing vcpu->kvm over and over (and over). > > No functional change intended. > > Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> > --- > arch/x86/kvm/mmu/mmu.c | 14 +++++++------- > 1 file changed, 7 insertions(+), 7 deletions(-) > > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c > index 76c5826e29a2..cdc498093450 100644 > --- a/arch/x86/kvm/mmu/mmu.c > +++ b/arch/x86/kvm/mmu/mmu.c > @@ -3603,6 +3603,7 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa, > void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, > ulong roots_to_free) > { > + struct kvm *kvm = vcpu->kvm; > int i; > LIST_HEAD(invalid_list); > bool free_active_root = roots_to_free & KVM_MMU_ROOT_CURRENT; > @@ -3620,22 +3621,21 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, > return; > } > > - spin_lock(&vcpu->kvm->mmu_lock); > + spin_lock(&kvm->mmu_lock); > > for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) > if (roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) > - mmu_free_root_page(vcpu->kvm, &mmu->prev_roots[i].hpa, > + mmu_free_root_page(kvm, &mmu->prev_roots[i].hpa, > &invalid_list); > > if (free_active_root) { > if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL && > (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) { > - mmu_free_root_page(vcpu->kvm, &mmu->root_hpa, > - &invalid_list); > + mmu_free_root_page(kvm, &mmu->root_hpa, &invalid_list); > } else { > for (i = 0; i < 4; ++i) > if (mmu->pae_root[i] != 0) > - mmu_free_root_page(vcpu->kvm, > + mmu_free_root_page(kvm, > &mmu->pae_root[i], > &invalid_list); > mmu->root_hpa = INVALID_PAGE; > @@ -3643,8 +3643,8 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, > mmu->root_pgd = 0; > } > > - kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); > - spin_unlock(&vcpu->kvm->mmu_lock); > + kvm_mmu_commit_zap_page(kvm, &invalid_list); > + spin_unlock(&kvm->mmu_lock); > } > EXPORT_SYMBOL_GPL(kvm_mmu_free_roots); What about kvm_mmu_get_page(), make_mmu_pages_available(), mmu_alloc_root(), kvm_mmu_sync_roots(), direct_page_fault(), kvm_mmu_pte_write() which seem to be using the same ugly pattern? :-) Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com>
On Thu, Sep 24, 2020 at 02:42:19PM +0200, Vitaly Kuznetsov wrote: > What about kvm_mmu_get_page(), make_mmu_pages_available(), > mmu_alloc_root(), kvm_mmu_sync_roots(), direct_page_fault(), > kvm_mmu_pte_write() which seem to be using the same ugly pattern? :-) Heh, good question. I guess only kvm_mmu_free_roots() managed to cross over the threshold from "that's ugly" to "this is ridiculous".
On 23/09/20 21:12, Sean Christopherson wrote: > To make kvm_mmu_free_roots() a bit more readable, capture 'kvm' in a > local variable instead of doing vcpu->kvm over and over (and over). > > No functional change intended. > > Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> > --- > arch/x86/kvm/mmu/mmu.c | 14 +++++++------- > 1 file changed, 7 insertions(+), 7 deletions(-) > > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c > index 76c5826e29a2..cdc498093450 100644 > --- a/arch/x86/kvm/mmu/mmu.c > +++ b/arch/x86/kvm/mmu/mmu.c > @@ -3603,6 +3603,7 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa, > void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, > ulong roots_to_free) > { > + struct kvm *kvm = vcpu->kvm; > int i; > LIST_HEAD(invalid_list); > bool free_active_root = roots_to_free & KVM_MMU_ROOT_CURRENT; > @@ -3620,22 +3621,21 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, > return; > } > > - spin_lock(&vcpu->kvm->mmu_lock); > + spin_lock(&kvm->mmu_lock); > > for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) > if (roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) > - mmu_free_root_page(vcpu->kvm, &mmu->prev_roots[i].hpa, > + mmu_free_root_page(kvm, &mmu->prev_roots[i].hpa, > &invalid_list); > > if (free_active_root) { > if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL && > (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) { > - mmu_free_root_page(vcpu->kvm, &mmu->root_hpa, > - &invalid_list); > + mmu_free_root_page(kvm, &mmu->root_hpa, &invalid_list); > } else { > for (i = 0; i < 4; ++i) > if (mmu->pae_root[i] != 0) > - mmu_free_root_page(vcpu->kvm, > + mmu_free_root_page(kvm, > &mmu->pae_root[i], > &invalid_list); > mmu->root_hpa = INVALID_PAGE; > @@ -3643,8 +3643,8 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, > mmu->root_pgd = 0; > } > > - kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); > - spin_unlock(&vcpu->kvm->mmu_lock); > + kvm_mmu_commit_zap_page(kvm, &invalid_list); > + spin_unlock(&kvm->mmu_lock); > } > EXPORT_SYMBOL_GPL(kvm_mmu_free_roots); > > Queued this one, for now. Paolo
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 76c5826e29a2..cdc498093450 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3603,6 +3603,7 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa, void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, ulong roots_to_free) { + struct kvm *kvm = vcpu->kvm; int i; LIST_HEAD(invalid_list); bool free_active_root = roots_to_free & KVM_MMU_ROOT_CURRENT; @@ -3620,22 +3621,21 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, return; } - spin_lock(&vcpu->kvm->mmu_lock); + spin_lock(&kvm->mmu_lock); for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) if (roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) - mmu_free_root_page(vcpu->kvm, &mmu->prev_roots[i].hpa, + mmu_free_root_page(kvm, &mmu->prev_roots[i].hpa, &invalid_list); if (free_active_root) { if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL && (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) { - mmu_free_root_page(vcpu->kvm, &mmu->root_hpa, - &invalid_list); + mmu_free_root_page(kvm, &mmu->root_hpa, &invalid_list); } else { for (i = 0; i < 4; ++i) if (mmu->pae_root[i] != 0) - mmu_free_root_page(vcpu->kvm, + mmu_free_root_page(kvm, &mmu->pae_root[i], &invalid_list); mmu->root_hpa = INVALID_PAGE; @@ -3643,8 +3643,8 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, mmu->root_pgd = 0; } - kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); - spin_unlock(&vcpu->kvm->mmu_lock); + kvm_mmu_commit_zap_page(kvm, &invalid_list); + spin_unlock(&kvm->mmu_lock); } EXPORT_SYMBOL_GPL(kvm_mmu_free_roots);
To make kvm_mmu_free_roots() a bit more readable, capture 'kvm' in a local variable instead of doing vcpu->kvm over and over (and over). No functional change intended. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> --- arch/x86/kvm/mmu/mmu.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-)