@@ -519,6 +519,7 @@ struct kvm_pmc {
*/
u64 emulated_counter;
u64 eventsel;
+ u64 eventsel_hw;
u64 msr_counter;
u64 msr_eventsel;
struct perf_event *perf_event;
@@ -1085,10 +1085,9 @@ void kvm_pmu_save_pmu_context(struct kvm_vcpu *vcpu)
for (i = 0; i < pmu->nr_arch_gp_counters; i++) {
pmc = &pmu->gp_counters[i];
rdmsrl(pmc->msr_counter, pmc->counter);
- rdmsrl(pmc->msr_eventsel, pmc->eventsel);
if (pmc->counter)
wrmsrl(pmc->msr_counter, 0);
- if (pmc->eventsel)
+ if (pmc->eventsel_hw)
wrmsrl(pmc->msr_eventsel, 0);
}
@@ -1118,7 +1117,7 @@ void kvm_pmu_restore_pmu_context(struct kvm_vcpu *vcpu)
for (i = 0; i < pmu->nr_arch_gp_counters; i++) {
pmc = &pmu->gp_counters[i];
wrmsrl(pmc->msr_counter, pmc->counter);
- wrmsrl(pmc->msr_eventsel, pmu->gp_counters[i].eventsel);
+ wrmsrl(pmc->msr_eventsel, pmu->gp_counters[i].eventsel_hw);
}
for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
@@ -399,7 +399,18 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (data & reserved_bits)
return 1;
- if (data != pmc->eventsel) {
+ if (is_passthrough_pmu_enabled(vcpu)) {
+ pmc->eventsel = data;
+ if (!check_pmu_event_filter(pmc)) {
+ if (pmc->eventsel_hw &
+ ARCH_PERFMON_EVENTSEL_ENABLE) {
+ pmc->eventsel_hw &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
+ pmc->counter = 0;
+ }
+ return 0;
+ }
+ pmc->eventsel_hw = data;
+ } else if (data != pmc->eventsel) {
pmc->eventsel = data;
kvm_pmu_request_counter_reprogram(pmc);
}