diff mbox series

[RFC,10/15] KVM: x86/mmu: Abstract need_resched logic from tdp_mmu_iter_cond_resched

Message ID 20211119235759.1304274-11-dmatlack@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86/mmu: Eager Page Splitting for the TDP MMU | expand

Commit Message

David Matlack Nov. 19, 2021, 11:57 p.m. UTC
Abstract out the logic that checks whether or not we should reschedule
(including the extra check that ensures we make forward progress) to a
helper method. This will be used in a follow-up commit to reschedule
during large page splitting.

No functional change intended.

Signed-off-by: David Matlack <dmatlack@google.com>
---
 arch/x86/kvm/mmu/tdp_mmu.c | 15 ++++++++++-----
 1 file changed, 10 insertions(+), 5 deletions(-)

Comments

Ben Gardon Nov. 22, 2021, 6:56 p.m. UTC | #1
On Fri, Nov 19, 2021 at 3:58 PM David Matlack <dmatlack@google.com> wrote:
>
> Abstract out the logic that checks whether or not we should reschedule
> (including the extra check that ensures we make forward progress) to a
> helper method. This will be used in a follow-up commit to reschedule
> during large page splitting.
>
> No functional change intended.
>
> Signed-off-by: David Matlack <dmatlack@google.com>

Reviewed-by: Ben Gardon <bgardon@google.com>


> ---
>  arch/x86/kvm/mmu/tdp_mmu.c | 15 ++++++++++-----
>  1 file changed, 10 insertions(+), 5 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
> index f8c4337f1fcf..2221e074d8ea 100644
> --- a/arch/x86/kvm/mmu/tdp_mmu.c
> +++ b/arch/x86/kvm/mmu/tdp_mmu.c
> @@ -645,6 +645,15 @@ static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
>         for_each_tdp_pte(_iter, __va(_mmu->root_hpa),           \
>                          _mmu->shadow_root_level, _start, _end)
>
> +static inline bool tdp_mmu_iter_need_resched(struct kvm *kvm, struct tdp_iter *iter)
> +{
> +       /* Ensure forward progress has been made before yielding. */
> +       if (iter->next_last_level_gfn == iter->yielded_gfn)
> +               return false;
> +
> +       return need_resched() || rwlock_needbreak(&kvm->mmu_lock);
> +}
> +
>  /*
>   * Yield if the MMU lock is contended or this thread needs to return control
>   * to the scheduler.
> @@ -664,11 +673,7 @@ static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
>                                              struct tdp_iter *iter, bool flush,
>                                              bool shared)
>  {
> -       /* Ensure forward progress has been made before yielding. */
> -       if (iter->next_last_level_gfn == iter->yielded_gfn)
> -               return false;
> -
> -       if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
> +       if (tdp_mmu_iter_need_resched(kvm, iter)) {
>                 rcu_read_unlock();
>
>                 if (flush)
> --
> 2.34.0.rc2.393.gf8c9666880-goog
>
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index f8c4337f1fcf..2221e074d8ea 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -645,6 +645,15 @@  static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
 	for_each_tdp_pte(_iter, __va(_mmu->root_hpa),		\
 			 _mmu->shadow_root_level, _start, _end)
 
+static inline bool tdp_mmu_iter_need_resched(struct kvm *kvm, struct tdp_iter *iter)
+{
+	/* Ensure forward progress has been made before yielding. */
+	if (iter->next_last_level_gfn == iter->yielded_gfn)
+		return false;
+
+	return need_resched() || rwlock_needbreak(&kvm->mmu_lock);
+}
+
 /*
  * Yield if the MMU lock is contended or this thread needs to return control
  * to the scheduler.
@@ -664,11 +673,7 @@  static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
 					     struct tdp_iter *iter, bool flush,
 					     bool shared)
 {
-	/* Ensure forward progress has been made before yielding. */
-	if (iter->next_last_level_gfn == iter->yielded_gfn)
-		return false;
-
-	if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
+	if (tdp_mmu_iter_need_resched(kvm, iter)) {
 		rcu_read_unlock();
 
 		if (flush)