Message ID | 20221220161236.555143-2-aaronlewis@google.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Introduce and test masked events | expand |
On 21/12/2022 12:12 am, Aaron Lewis wrote: > When checking if a pmu event the guest is attempting to program should > be filtered, only consider the event select + unit mask in that > decision. Use an architecture specific mask to mask out all other bits, > including bits 35:32 on Intel. Those bits are not part of the event > select and should not be considered in that decision. > > Fixes: 66bb8a065f5a ("KVM: x86: PMU Event Filter") > Signed-off-by: Aaron Lewis <aaronlewis@google.com> > --- > arch/x86/kvm/pmu.c | 3 ++- > arch/x86/kvm/pmu.h | 2 ++ > arch/x86/kvm/svm/pmu.c | 1 + > arch/x86/kvm/vmx/pmu_intel.c | 1 + > 4 files changed, 6 insertions(+), 1 deletion(-) > > diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c > index 684393c22105..760a09ff65cd 100644 > --- a/arch/x86/kvm/pmu.c > +++ b/arch/x86/kvm/pmu.c > @@ -277,7 +277,8 @@ static bool check_pmu_event_filter(struct kvm_pmc *pmc) > goto out; > > if (pmc_is_gp(pmc)) { > - key = pmc->eventsel & AMD64_RAW_EVENT_MASK_NB; > + key = pmc->eventsel & (kvm_pmu_ops.EVENTSEL_EVENT | > + ARCH_PERFMON_EVENTSEL_UMASK); > if (bsearch(&key, filter->events, filter->nevents, > sizeof(__u64), cmp_u64)) > allow_event = filter->action == KVM_PMU_EVENT_ALLOW; > diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h > index 85ff3c0588ba..5b070c563a97 100644 > --- a/arch/x86/kvm/pmu.h > +++ b/arch/x86/kvm/pmu.h > @@ -40,6 +40,8 @@ struct kvm_pmu_ops { > void (*reset)(struct kvm_vcpu *vcpu); > void (*deliver_pmi)(struct kvm_vcpu *vcpu); > void (*cleanup)(struct kvm_vcpu *vcpu); > + > + const u64 EVENTSEL_EVENT; Isn't it weird when the new thing added here is not of the same type as the existing members ? Doesn't "pmu->raw_event_mask" help here ? > }; > > void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops); > diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c > index 0e313fbae055..d3ae261d56a6 100644 > --- a/arch/x86/kvm/svm/pmu.c > +++ b/arch/x86/kvm/svm/pmu.c > @@ -229,4 +229,5 @@ struct kvm_pmu_ops amd_pmu_ops __initdata = { > .refresh = amd_pmu_refresh, > .init = amd_pmu_init, > .reset = amd_pmu_reset, > + .EVENTSEL_EVENT = AMD64_EVENTSEL_EVENT, > }; > diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c > index e5cec07ca8d9..edf23115f2ef 100644 > --- a/arch/x86/kvm/vmx/pmu_intel.c > +++ b/arch/x86/kvm/vmx/pmu_intel.c > @@ -810,4 +810,5 @@ struct kvm_pmu_ops intel_pmu_ops __initdata = { > .reset = intel_pmu_reset, > .deliver_pmi = intel_pmu_deliver_pmi, > .cleanup = intel_pmu_cleanup, > + .EVENTSEL_EVENT = ARCH_PERFMON_EVENTSEL_EVENT, > };
> > diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h > > index 85ff3c0588ba..5b070c563a97 100644 > > --- a/arch/x86/kvm/pmu.h > > +++ b/arch/x86/kvm/pmu.h > > @@ -40,6 +40,8 @@ struct kvm_pmu_ops { > > void (*reset)(struct kvm_vcpu *vcpu); > > void (*deliver_pmi)(struct kvm_vcpu *vcpu); > > void (*cleanup)(struct kvm_vcpu *vcpu); > > + > > + const u64 EVENTSEL_EVENT; > > Isn't it weird when the new thing added here is > not of the same type as the existing members ? > > Doesn't "pmu->raw_event_mask" help here ? > In earlier revisions I had this as a callback, but it was a little awkward being that I really just wanted it to be a const that differed depending on platform. Making it a const fit more naturally when using it than the callback approach, so despite it being different from the others here I think it works better overall. > > }; > > > > void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops); > > diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c > > index 0e313fbae055..d3ae261d56a6 100644 > > --- a/arch/x86/kvm/svm/pmu.c > > +++ b/arch/x86/kvm/svm/pmu.c > > @@ -229,4 +229,5 @@ struct kvm_pmu_ops amd_pmu_ops __initdata = { > > .refresh = amd_pmu_refresh, > > .init = amd_pmu_init, > > .reset = amd_pmu_reset, > > + .EVENTSEL_EVENT = AMD64_EVENTSEL_EVENT, > > }; > > diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c > > index e5cec07ca8d9..edf23115f2ef 100644 > > --- a/arch/x86/kvm/vmx/pmu_intel.c > > +++ b/arch/x86/kvm/vmx/pmu_intel.c > > @@ -810,4 +810,5 @@ struct kvm_pmu_ops intel_pmu_ops __initdata = { > > .reset = intel_pmu_reset, > > .deliver_pmi = intel_pmu_deliver_pmi, > > .cleanup = intel_pmu_cleanup, > > + .EVENTSEL_EVENT = ARCH_PERFMON_EVENTSEL_EVENT, > > };
On Wed, Dec 28, 2022, Aaron Lewis wrote: > > > diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h > > > index 85ff3c0588ba..5b070c563a97 100644 > > > --- a/arch/x86/kvm/pmu.h > > > +++ b/arch/x86/kvm/pmu.h > > > @@ -40,6 +40,8 @@ struct kvm_pmu_ops { > > > void (*reset)(struct kvm_vcpu *vcpu); > > > void (*deliver_pmi)(struct kvm_vcpu *vcpu); > > > void (*cleanup)(struct kvm_vcpu *vcpu); > > > + > > > + const u64 EVENTSEL_EVENT; > > > > Isn't it weird when the new thing added here is > > not of the same type as the existing members ? > > > > Doesn't "pmu->raw_event_mask" help here ? > > > > In earlier revisions I had this as a callback, but it was a little > awkward being that I really just wanted it to be a const that differed > depending on platform. Making it a const fit more naturally when > using it than the callback approach, so despite it being different > from the others here I think it works better overall. And KVM already does similar things in kvm_x86_ops.
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 684393c22105..760a09ff65cd 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -277,7 +277,8 @@ static bool check_pmu_event_filter(struct kvm_pmc *pmc) goto out; if (pmc_is_gp(pmc)) { - key = pmc->eventsel & AMD64_RAW_EVENT_MASK_NB; + key = pmc->eventsel & (kvm_pmu_ops.EVENTSEL_EVENT | + ARCH_PERFMON_EVENTSEL_UMASK); if (bsearch(&key, filter->events, filter->nevents, sizeof(__u64), cmp_u64)) allow_event = filter->action == KVM_PMU_EVENT_ALLOW; diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h index 85ff3c0588ba..5b070c563a97 100644 --- a/arch/x86/kvm/pmu.h +++ b/arch/x86/kvm/pmu.h @@ -40,6 +40,8 @@ struct kvm_pmu_ops { void (*reset)(struct kvm_vcpu *vcpu); void (*deliver_pmi)(struct kvm_vcpu *vcpu); void (*cleanup)(struct kvm_vcpu *vcpu); + + const u64 EVENTSEL_EVENT; }; void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops); diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c index 0e313fbae055..d3ae261d56a6 100644 --- a/arch/x86/kvm/svm/pmu.c +++ b/arch/x86/kvm/svm/pmu.c @@ -229,4 +229,5 @@ struct kvm_pmu_ops amd_pmu_ops __initdata = { .refresh = amd_pmu_refresh, .init = amd_pmu_init, .reset = amd_pmu_reset, + .EVENTSEL_EVENT = AMD64_EVENTSEL_EVENT, }; diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index e5cec07ca8d9..edf23115f2ef 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -810,4 +810,5 @@ struct kvm_pmu_ops intel_pmu_ops __initdata = { .reset = intel_pmu_reset, .deliver_pmi = intel_pmu_deliver_pmi, .cleanup = intel_pmu_cleanup, + .EVENTSEL_EVENT = ARCH_PERFMON_EVENTSEL_EVENT, };
When checking if a pmu event the guest is attempting to program should be filtered, only consider the event select + unit mask in that decision. Use an architecture specific mask to mask out all other bits, including bits 35:32 on Intel. Those bits are not part of the event select and should not be considered in that decision. Fixes: 66bb8a065f5a ("KVM: x86: PMU Event Filter") Signed-off-by: Aaron Lewis <aaronlewis@google.com> --- arch/x86/kvm/pmu.c | 3 ++- arch/x86/kvm/pmu.h | 2 ++ arch/x86/kvm/svm/pmu.c | 1 + arch/x86/kvm/vmx/pmu_intel.c | 1 + 4 files changed, 6 insertions(+), 1 deletion(-)