@@ -428,7 +428,7 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
}
-void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu, bool sched_in)
{
struct kvm_s2_mmu *mmu;
int *last_ran;
@@ -2193,7 +2193,7 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu)
*vcpu_pc(vcpu) = elr;
*vcpu_cpsr(vcpu) = spsr;
- kvm_arch_vcpu_load(vcpu, smp_processor_id());
+ kvm_arch_vcpu_load(vcpu, smp_processor_id(), false);
preempt_enable();
}
@@ -2274,7 +2274,7 @@ static int kvm_inject_nested(struct kvm_vcpu *vcpu, u64 esr_el2,
*/
__kvm_adjust_pc(vcpu);
- kvm_arch_vcpu_load(vcpu, smp_processor_id());
+ kvm_arch_vcpu_load(vcpu, smp_processor_id(), false);
preempt_enable();
return 1;
@@ -262,7 +262,7 @@ void kvm_reset_vcpu(struct kvm_vcpu *vcpu)
kvm_timer_vcpu_reset(vcpu);
if (loaded)
- kvm_arch_vcpu_load(vcpu, smp_processor_id());
+ kvm_arch_vcpu_load(vcpu, smp_processor_id(), false);
preempt_enable();
}
@@ -1050,7 +1050,7 @@ static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
return 0;
}
-void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu, bool sched_in)
{
unsigned long flags;
@@ -682,7 +682,7 @@ static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
}
/* Restore ASID once we are scheduled back after preemption */
-void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu, bool sched_in)
{
unsigned long flags;
@@ -826,7 +826,7 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
return kvmppc_core_pending_dec(vcpu);
}
-void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu, bool sched_in)
{
#ifdef CONFIG_BOOKE
/*
@@ -87,7 +87,7 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
/* Reset the guest CSRs for hotplug usecase */
if (loaded)
- kvm_arch_vcpu_load(vcpu, smp_processor_id());
+ kvm_arch_vcpu_load(vcpu, smp_processor_id(), false);
put_cpu();
}
@@ -507,7 +507,7 @@ static void kvm_riscv_vcpu_setup_config(struct kvm_vcpu *vcpu)
}
}
-void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu, bool sched_in)
{
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
struct kvm_vcpu_config *cfg = &vcpu->arch.cfg;
@@ -3713,7 +3713,7 @@ __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
return value;
}
-void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu, bool shed_in)
{
gmap_enable(vcpu->arch.enabled_gmap);
@@ -5003,7 +5003,7 @@ static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
return kvm_arch_has_noncoherent_dma(vcpu->kvm);
}
-void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu, bool sched_in)
{
/* Address WBINVD may be executed by guest */
if (need_emulate_wbinvd(vcpu)) {
@@ -1498,7 +1498,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu);
void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu);
-void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu, bool sched_in);
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id);
int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu);
@@ -211,7 +211,7 @@ void vcpu_load(struct kvm_vcpu *vcpu)
__this_cpu_write(kvm_running_vcpu, vcpu);
preempt_notifier_register(&vcpu->preempt_notifier);
- kvm_arch_vcpu_load(vcpu, cpu);
+ kvm_arch_vcpu_load(vcpu, cpu, false);
put_cpu();
}
EXPORT_SYMBOL_GPL(vcpu_load);
@@ -6279,7 +6279,7 @@ static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
__this_cpu_write(kvm_running_vcpu, vcpu);
kvm_arch_sched_in(vcpu, cpu);
- kvm_arch_vcpu_load(vcpu, cpu);
+ kvm_arch_vcpu_load(vcpu, cpu, true);
}
static void kvm_sched_out(struct preempt_notifier *pn,
Add a @sched_in flag to kvm_arch_vcpu_load() to note that the vCPU is being (re)loaded by kvm_sched_in(), i.e. after the vCPU was previously scheduled out. KVM x86 currently uses a dedicated kvm_arch_sched_in() hook, but that's unnecessarily brittle as the behavior of the arch hook heavily depends on the arbitrary order of the two arch calls. A separate hook also makes it unnecessarily difficult to do something unique when re-loading vCPU during kvm_sched_in(), e.g. to optimize vCPU loading if KVM knows that some CPU state couldn't have changed while the vCPU was scheduled out. Signed-off-by: Sean Christopherson <seanjc@google.com> --- arch/arm64/kvm/arm.c | 2 +- arch/arm64/kvm/emulate-nested.c | 4 ++-- arch/arm64/kvm/reset.c | 2 +- arch/loongarch/kvm/vcpu.c | 2 +- arch/mips/kvm/mmu.c | 2 +- arch/powerpc/kvm/powerpc.c | 2 +- arch/riscv/kvm/vcpu.c | 4 ++-- arch/s390/kvm/kvm-s390.c | 2 +- arch/x86/kvm/x86.c | 2 +- include/linux/kvm_host.h | 2 +- virt/kvm/kvm_main.c | 4 ++-- 11 files changed, 14 insertions(+), 14 deletions(-)