Message ID | 1364487515-6454-2-git-send-email-pbonzini@redhat.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Thu, Mar 28, 2013 at 05:18:35PM +0100, Paolo Bonzini wrote: > In order to migrate the PMU state correctly, we need to restore the > values of MSR_CORE_PERF_GLOBAL_STATUS (a read-only register) and > MSR_CORE_PERF_GLOBAL_OVF_CTRL (which has side effects when written). > We also need to write the full 40-bit value of the performance counter, > > which would only be possible with a v3 architectural PMU's > > full-width counter MSRs. > > > > To distinguish host-initiated writes from the guest's, pass the > > full struct msr_data to kvm_pmu_set_msr. > > Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Applied, thanks. > --- > arch/x86/include/asm/kvm_host.h | 2 +- > arch/x86/kvm/pmu.c | 14 +++++++++++--- > arch/x86/kvm/x86.c | 4 ++-- > 3 files changed, 14 insertions(+), 6 deletions(-) > > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h > index 36fba01..e2e09f3 100644 > --- a/arch/x86/include/asm/kvm_host.h > +++ b/arch/x86/include/asm/kvm_host.h > @@ -1029,7 +1029,7 @@ void kvm_pmu_reset(struct kvm_vcpu *vcpu); > void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu); > bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr); > int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data); > -int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data); > +int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); > int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data); > void kvm_handle_pmu_event(struct kvm_vcpu *vcpu); > void kvm_deliver_pmi(struct kvm_vcpu *vcpu); > diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c > index cfc258a..c53e797 100644 > --- a/arch/x86/kvm/pmu.c > +++ b/arch/x86/kvm/pmu.c > @@ -360,10 +360,12 @@ int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data) > return 1; > } > > -int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data) > +int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > { > struct kvm_pmu *pmu = &vcpu->arch.pmu; > struct kvm_pmc *pmc; > + u32 index = msr_info->index; > + u64 data = msr_info->data; > > switch (index) { > case MSR_CORE_PERF_FIXED_CTR_CTRL: > @@ -375,6 +377,10 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data) > } > break; > case MSR_CORE_PERF_GLOBAL_STATUS: > + if (msr_info->host_initiated) { > + pmu->global_status = data; > + return 0; > + } > break; /* RO MSR */ > case MSR_CORE_PERF_GLOBAL_CTRL: > if (pmu->global_ctrl == data) > @@ -386,7 +392,8 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data) > break; > case MSR_CORE_PERF_GLOBAL_OVF_CTRL: > if (!(data & (pmu->global_ctrl_mask & ~(3ull<<62)))) { > - pmu->global_status &= ~data; > + if (!msr_info->host_initiated) > + pmu->global_status &= ~data; > pmu->global_ovf_ctrl = data; > return 0; > } > @@ -394,7 +401,8 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data) > default: > if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) || > (pmc = get_fixed_pmc(pmu, index))) { > - data = (s64)(s32)data; > + if (!msr_info->host_initiated) > + data = (s64)(s32)data; > pmc->counter += data - read_pmc(pmc); > return 0; > } else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) { > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > index 3e0a8ba..1d928af 100644 > --- a/arch/x86/kvm/x86.c > +++ b/arch/x86/kvm/x86.c > @@ -2042,7 +2042,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > case MSR_P6_EVNTSEL0: > case MSR_P6_EVNTSEL1: > if (kvm_pmu_msr(vcpu, msr)) > - return kvm_pmu_set_msr(vcpu, msr, data); > + return kvm_pmu_set_msr(vcpu, msr_info); > > if (pr || data != 0) > vcpu_unimpl(vcpu, "disabled perfctr wrmsr: " > @@ -2088,7 +2088,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr)) > return xen_hvm_config(vcpu, data); > if (kvm_pmu_msr(vcpu, msr)) > - return kvm_pmu_set_msr(vcpu, msr, data); > + return kvm_pmu_set_msr(vcpu, msr_info); > if (!ignore_msrs) { > vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", > msr, data); > -- > 1.8.1.4 -- Gleb. -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 36fba01..e2e09f3 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1029,7 +1029,7 @@ void kvm_pmu_reset(struct kvm_vcpu *vcpu); void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu); bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr); int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data); -int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data); +int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data); void kvm_handle_pmu_event(struct kvm_vcpu *vcpu); void kvm_deliver_pmi(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index cfc258a..c53e797 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -360,10 +360,12 @@ int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data) return 1; } -int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data) +int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { struct kvm_pmu *pmu = &vcpu->arch.pmu; struct kvm_pmc *pmc; + u32 index = msr_info->index; + u64 data = msr_info->data; switch (index) { case MSR_CORE_PERF_FIXED_CTR_CTRL: @@ -375,6 +377,10 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data) } break; case MSR_CORE_PERF_GLOBAL_STATUS: + if (msr_info->host_initiated) { + pmu->global_status = data; + return 0; + } break; /* RO MSR */ case MSR_CORE_PERF_GLOBAL_CTRL: if (pmu->global_ctrl == data) @@ -386,7 +392,8 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data) break; case MSR_CORE_PERF_GLOBAL_OVF_CTRL: if (!(data & (pmu->global_ctrl_mask & ~(3ull<<62)))) { - pmu->global_status &= ~data; + if (!msr_info->host_initiated) + pmu->global_status &= ~data; pmu->global_ovf_ctrl = data; return 0; } @@ -394,7 +401,8 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data) default: if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) || (pmc = get_fixed_pmc(pmu, index))) { - data = (s64)(s32)data; + if (!msr_info->host_initiated) + data = (s64)(s32)data; pmc->counter += data - read_pmc(pmc); return 0; } else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) { diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 3e0a8ba..1d928af 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2042,7 +2042,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_P6_EVNTSEL0: case MSR_P6_EVNTSEL1: if (kvm_pmu_msr(vcpu, msr)) - return kvm_pmu_set_msr(vcpu, msr, data); + return kvm_pmu_set_msr(vcpu, msr_info); if (pr || data != 0) vcpu_unimpl(vcpu, "disabled perfctr wrmsr: " @@ -2088,7 +2088,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr)) return xen_hvm_config(vcpu, data); if (kvm_pmu_msr(vcpu, msr)) - return kvm_pmu_set_msr(vcpu, msr, data); + return kvm_pmu_set_msr(vcpu, msr_info); if (!ignore_msrs) { vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", msr, data);