Message ID | 20240801045907.4010984-28-mizhang@google.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Mediated Passthrough vPMU 3.0 for x86 | expand |
On 7/31/2024 9:58 PM, Mingwei Zhang wrote: > Add one extra pmu function prototype in kvm_pmu_ops to disable PMU MSR > interception. > > Signed-off-by: Mingwei Zhang <mizhang@google.com> > Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com> > Tested-by: Yongwei Ma <yongwei.ma@intel.com> > --- > arch/x86/include/asm/kvm-x86-pmu-ops.h | 1 + > arch/x86/kvm/cpuid.c | 4 ++++ > arch/x86/kvm/pmu.c | 5 +++++ > arch/x86/kvm/pmu.h | 2 ++ > 4 files changed, 12 insertions(+) > > diff --git a/arch/x86/include/asm/kvm-x86-pmu-ops.h b/arch/x86/include/asm/kvm-x86-pmu-ops.h > index fd986d5146e4..1b7876dcb3c3 100644 > --- a/arch/x86/include/asm/kvm-x86-pmu-ops.h > +++ b/arch/x86/include/asm/kvm-x86-pmu-ops.h > @@ -24,6 +24,7 @@ KVM_X86_PMU_OP(is_rdpmc_passthru_allowed) > KVM_X86_PMU_OP_OPTIONAL(reset) > KVM_X86_PMU_OP_OPTIONAL(deliver_pmi) > KVM_X86_PMU_OP_OPTIONAL(cleanup) > +KVM_X86_PMU_OP_OPTIONAL(passthrough_pmu_msrs) > > #undef KVM_X86_PMU_OP > #undef KVM_X86_PMU_OP_OPTIONAL > diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c > index f2f2be5d1141..3deb79b39847 100644 > --- a/arch/x86/kvm/cpuid.c > +++ b/arch/x86/kvm/cpuid.c > @@ -381,6 +381,10 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) > vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu); > > kvm_pmu_refresh(vcpu); > + > + if (is_passthrough_pmu_enabled(vcpu)) > + kvm_pmu_passthrough_pmu_msrs(vcpu); > + > vcpu->arch.cr4_guest_rsvd_bits = > __cr4_reserved_bits(guest_cpuid_has, vcpu); > > diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c > index 3afefe4cf6e2..bd94f2d67f5c 100644 > --- a/arch/x86/kvm/pmu.c > +++ b/arch/x86/kvm/pmu.c > @@ -1059,3 +1059,8 @@ int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp) > kfree(filter); > return r; > } > + > +void kvm_pmu_passthrough_pmu_msrs(struct kvm_vcpu *vcpu) > +{ > + static_call_cond(kvm_x86_pmu_passthrough_pmu_msrs)(vcpu); > +} > diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h > index e1af6d07b191..63f876557716 100644 > --- a/arch/x86/kvm/pmu.h > +++ b/arch/x86/kvm/pmu.h > @@ -41,6 +41,7 @@ struct kvm_pmu_ops { > void (*deliver_pmi)(struct kvm_vcpu *vcpu); > void (*cleanup)(struct kvm_vcpu *vcpu); > bool (*is_rdpmc_passthru_allowed)(struct kvm_vcpu *vcpu); > + void (*passthrough_pmu_msrs)(struct kvm_vcpu *vcpu); Seems after_set_cpuid() is a better name. It's more generic to reflect the fact that PMU needs to do something after userspace sets CPUID. Currently PMU needs to update the MSR interception policy, but it may want to do more in the future. Also, it's more consistent to other APIs called in kvm_vcpu_after_set_cpuid(). > > const u64 EVENTSEL_EVENT; > const int MAX_NR_GP_COUNTERS; > @@ -292,6 +293,7 @@ void kvm_pmu_destroy(struct kvm_vcpu *vcpu); > int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp); > void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 eventsel); > bool kvm_pmu_check_rdpmc_passthrough(struct kvm_vcpu *vcpu); > +void kvm_pmu_passthrough_pmu_msrs(struct kvm_vcpu *vcpu); > > bool is_vmware_backdoor_pmc(u32 pmc_idx); >
On 10/25/2024 3:58 AM, Chen, Zide wrote: > > On 7/31/2024 9:58 PM, Mingwei Zhang wrote: >> Add one extra pmu function prototype in kvm_pmu_ops to disable PMU MSR >> interception. >> >> Signed-off-by: Mingwei Zhang <mizhang@google.com> >> Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com> >> Tested-by: Yongwei Ma <yongwei.ma@intel.com> >> --- >> arch/x86/include/asm/kvm-x86-pmu-ops.h | 1 + >> arch/x86/kvm/cpuid.c | 4 ++++ >> arch/x86/kvm/pmu.c | 5 +++++ >> arch/x86/kvm/pmu.h | 2 ++ >> 4 files changed, 12 insertions(+) >> >> diff --git a/arch/x86/include/asm/kvm-x86-pmu-ops.h b/arch/x86/include/asm/kvm-x86-pmu-ops.h >> index fd986d5146e4..1b7876dcb3c3 100644 >> --- a/arch/x86/include/asm/kvm-x86-pmu-ops.h >> +++ b/arch/x86/include/asm/kvm-x86-pmu-ops.h >> @@ -24,6 +24,7 @@ KVM_X86_PMU_OP(is_rdpmc_passthru_allowed) >> KVM_X86_PMU_OP_OPTIONAL(reset) >> KVM_X86_PMU_OP_OPTIONAL(deliver_pmi) >> KVM_X86_PMU_OP_OPTIONAL(cleanup) >> +KVM_X86_PMU_OP_OPTIONAL(passthrough_pmu_msrs) >> >> #undef KVM_X86_PMU_OP >> #undef KVM_X86_PMU_OP_OPTIONAL >> diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c >> index f2f2be5d1141..3deb79b39847 100644 >> --- a/arch/x86/kvm/cpuid.c >> +++ b/arch/x86/kvm/cpuid.c >> @@ -381,6 +381,10 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) >> vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu); >> >> kvm_pmu_refresh(vcpu); >> + >> + if (is_passthrough_pmu_enabled(vcpu)) >> + kvm_pmu_passthrough_pmu_msrs(vcpu); >> + >> vcpu->arch.cr4_guest_rsvd_bits = >> __cr4_reserved_bits(guest_cpuid_has, vcpu); >> >> diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c >> index 3afefe4cf6e2..bd94f2d67f5c 100644 >> --- a/arch/x86/kvm/pmu.c >> +++ b/arch/x86/kvm/pmu.c >> @@ -1059,3 +1059,8 @@ int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp) >> kfree(filter); >> return r; >> } >> + >> +void kvm_pmu_passthrough_pmu_msrs(struct kvm_vcpu *vcpu) >> +{ >> + static_call_cond(kvm_x86_pmu_passthrough_pmu_msrs)(vcpu); >> +} >> diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h >> index e1af6d07b191..63f876557716 100644 >> --- a/arch/x86/kvm/pmu.h >> +++ b/arch/x86/kvm/pmu.h >> @@ -41,6 +41,7 @@ struct kvm_pmu_ops { >> void (*deliver_pmi)(struct kvm_vcpu *vcpu); >> void (*cleanup)(struct kvm_vcpu *vcpu); >> bool (*is_rdpmc_passthru_allowed)(struct kvm_vcpu *vcpu); >> + void (*passthrough_pmu_msrs)(struct kvm_vcpu *vcpu); > Seems after_set_cpuid() is a better name. It's more generic to reflect > the fact that PMU needs to do something after userspace sets CPUID. > Currently PMU needs to update the MSR interception policy, but it may > want to do more in the future. > > Also, it's more consistent to other APIs called in > kvm_vcpu_after_set_cpuid(). Looks reasonable. > >> >> const u64 EVENTSEL_EVENT; >> const int MAX_NR_GP_COUNTERS; >> @@ -292,6 +293,7 @@ void kvm_pmu_destroy(struct kvm_vcpu *vcpu); >> int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp); >> void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 eventsel); >> bool kvm_pmu_check_rdpmc_passthrough(struct kvm_vcpu *vcpu); >> +void kvm_pmu_passthrough_pmu_msrs(struct kvm_vcpu *vcpu); >> >> bool is_vmware_backdoor_pmc(u32 pmc_idx); >>
diff --git a/arch/x86/include/asm/kvm-x86-pmu-ops.h b/arch/x86/include/asm/kvm-x86-pmu-ops.h index fd986d5146e4..1b7876dcb3c3 100644 --- a/arch/x86/include/asm/kvm-x86-pmu-ops.h +++ b/arch/x86/include/asm/kvm-x86-pmu-ops.h @@ -24,6 +24,7 @@ KVM_X86_PMU_OP(is_rdpmc_passthru_allowed) KVM_X86_PMU_OP_OPTIONAL(reset) KVM_X86_PMU_OP_OPTIONAL(deliver_pmi) KVM_X86_PMU_OP_OPTIONAL(cleanup) +KVM_X86_PMU_OP_OPTIONAL(passthrough_pmu_msrs) #undef KVM_X86_PMU_OP #undef KVM_X86_PMU_OP_OPTIONAL diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index f2f2be5d1141..3deb79b39847 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -381,6 +381,10 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu); kvm_pmu_refresh(vcpu); + + if (is_passthrough_pmu_enabled(vcpu)) + kvm_pmu_passthrough_pmu_msrs(vcpu); + vcpu->arch.cr4_guest_rsvd_bits = __cr4_reserved_bits(guest_cpuid_has, vcpu); diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 3afefe4cf6e2..bd94f2d67f5c 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -1059,3 +1059,8 @@ int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp) kfree(filter); return r; } + +void kvm_pmu_passthrough_pmu_msrs(struct kvm_vcpu *vcpu) +{ + static_call_cond(kvm_x86_pmu_passthrough_pmu_msrs)(vcpu); +} diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h index e1af6d07b191..63f876557716 100644 --- a/arch/x86/kvm/pmu.h +++ b/arch/x86/kvm/pmu.h @@ -41,6 +41,7 @@ struct kvm_pmu_ops { void (*deliver_pmi)(struct kvm_vcpu *vcpu); void (*cleanup)(struct kvm_vcpu *vcpu); bool (*is_rdpmc_passthru_allowed)(struct kvm_vcpu *vcpu); + void (*passthrough_pmu_msrs)(struct kvm_vcpu *vcpu); const u64 EVENTSEL_EVENT; const int MAX_NR_GP_COUNTERS; @@ -292,6 +293,7 @@ void kvm_pmu_destroy(struct kvm_vcpu *vcpu); int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp); void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 eventsel); bool kvm_pmu_check_rdpmc_passthrough(struct kvm_vcpu *vcpu); +void kvm_pmu_passthrough_pmu_msrs(struct kvm_vcpu *vcpu); bool is_vmware_backdoor_pmc(u32 pmc_idx);