From patchwork Mon Mar 1 18:10:31 2010 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jan Kiszka X-Patchwork-Id: 83020 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by demeter.kernel.org (8.14.3/8.14.3) with ESMTP id o21IB2jX022156 for ; Mon, 1 Mar 2010 18:11:02 GMT Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752963Ab0CASK4 (ORCPT ); Mon, 1 Mar 2010 13:10:56 -0500 Received: from thoth.sbs.de ([192.35.17.2]:23770 "EHLO thoth.sbs.de" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751263Ab0CASKy (ORCPT ); Mon, 1 Mar 2010 13:10:54 -0500 Received: from mail1.siemens.de (localhost [127.0.0.1]) by thoth.sbs.de (8.12.11.20060308/8.12.11) with ESMTP id o21IAXXZ020087; Mon, 1 Mar 2010 19:10:33 +0100 Received: from localhost.localdomain (mchn012c.mchp.siemens.de [139.25.109.167] (may be forged)) by mail1.siemens.de (8.12.11.20060308/8.12.11) with ESMTP id o21IAXSU002877; Mon, 1 Mar 2010 19:10:33 +0100 From: Jan Kiszka To: Avi Kivity , Marcelo Tosatti Cc: kvm@vger.kernel.org, qemu-devel@nongnu.org Subject: [PATCH 3/4] KVM: x86: Restrict writeback of VCPU state Date: Mon, 1 Mar 2010 19:10:31 +0100 Message-Id: X-Mailer: git-send-email 1.6.0.2 In-Reply-To: References: In-Reply-To: References: Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org X-Greylist: IP, sender and recipient auto-whitelisted, not delayed by milter-greylist-4.2.3 (demeter.kernel.org [140.211.167.41]); Mon, 01 Mar 2010 18:11:02 +0000 (UTC) diff --git a/target-i386/kvm.c b/target-i386/kvm.c index a4767b2..0ac4391 100644 --- a/target-i386/kvm.c +++ b/target-i386/kvm.c @@ -545,7 +545,7 @@ static void kvm_msr_entry_set(struct kvm_msr_entry *entry, entry->data = value; } -static int kvm_put_msrs(CPUState *env) +static int kvm_put_msrs(CPUState *env, int level) { struct { struct kvm_msrs info; @@ -559,7 +559,6 @@ static int kvm_put_msrs(CPUState *env) kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip); if (kvm_has_msr_star(env)) kvm_msr_entry_set(&msrs[n++], MSR_STAR, env->star); - kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc); #ifdef TARGET_X86_64 /* FIXME if lm capable */ kvm_msr_entry_set(&msrs[n++], MSR_CSTAR, env->cstar); @@ -567,8 +566,12 @@ static int kvm_put_msrs(CPUState *env) kvm_msr_entry_set(&msrs[n++], MSR_FMASK, env->fmask); kvm_msr_entry_set(&msrs[n++], MSR_LSTAR, env->lstar); #endif - kvm_msr_entry_set(&msrs[n++], MSR_KVM_SYSTEM_TIME, env->system_time_msr); - kvm_msr_entry_set(&msrs[n++], MSR_KVM_WALL_CLOCK, env->wall_clock_msr); + if (level == KVM_PUT_FULL_STATE) { + kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc); + kvm_msr_entry_set(&msrs[n++], MSR_KVM_SYSTEM_TIME, + env->system_time_msr); + kvm_msr_entry_set(&msrs[n++], MSR_KVM_WALL_CLOCK, env->wall_clock_msr); + } msr_data.info.nmsrs = n; @@ -781,7 +784,7 @@ static int kvm_get_mp_state(CPUState *env) return 0; } -static int kvm_put_vcpu_events(CPUState *env) +static int kvm_put_vcpu_events(CPUState *env, int level) { #ifdef KVM_CAP_VCPU_EVENTS struct kvm_vcpu_events events; @@ -805,8 +808,11 @@ static int kvm_put_vcpu_events(CPUState *env) events.sipi_vector = env->sipi_vector; - events.flags = - KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR; + events.flags = 0; + if (level >= KVM_PUT_RESET_STATE) { + events.flags |= + KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR; + } return kvm_vcpu_ioctl(env, KVM_SET_VCPU_EVENTS, &events); #else @@ -898,15 +904,17 @@ int kvm_arch_put_registers(CPUState *env, int level) if (ret < 0) return ret; - ret = kvm_put_msrs(env); + ret = kvm_put_msrs(env, level); if (ret < 0) return ret; - ret = kvm_put_mp_state(env); - if (ret < 0) - return ret; + if (level >= KVM_PUT_RESET_STATE) { + ret = kvm_put_mp_state(env); + if (ret < 0) + return ret; + } - ret = kvm_put_vcpu_events(env); + ret = kvm_put_vcpu_events(env, level); if (ret < 0) return ret;