@@ -1402,7 +1402,6 @@ static int tdp_mmu_split_huge_pages_root(struct kvm *kvm,
{
struct kvm_mmu_page *sp = NULL;
struct tdp_iter iter;
- int ret = 0;
rcu_read_lock();
@@ -1440,16 +1439,15 @@ static int tdp_mmu_split_huge_pages_root(struct kvm *kvm,
else
write_lock(&kvm->mmu_lock);
- rcu_read_lock();
-
if (!sp) {
- ret = -ENOMEM;
trace_kvm_mmu_split_huge_page(iter.gfn,
iter.old_spte,
- iter.level, ret);
- break;
+ iter.level, -ENOMEM);
+ return -ENOMEM;
}
+ rcu_read_lock();
+
iter.yielded = true;
continue;
}
@@ -1472,7 +1470,7 @@ static int tdp_mmu_split_huge_pages_root(struct kvm *kvm,
if (sp)
tdp_mmu_free_sp(sp);
- return ret;
+ return 0;
}
Avoid needlessly reacquiring the RCU read lock if the TDP MMU fails to allocate a shadow page during eager page splitting. Opportunistically drop the local variable ret as well now that it's no longer necessary. Suggested-by: Sean Christopherson <seanjc@google.com> Signed-off-by: David Matlack <dmatlack@google.com> --- arch/x86/kvm/mmu/tdp_mmu.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-)