From patchwork Mon Jun 22 14:27:31 2009 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Gleb Natapov X-Patchwork-Id: 31761 Received: from vger.kernel.org (vger.kernel.org [209.132.176.167]) by demeter.kernel.org (8.14.2/8.14.2) with ESMTP id n5MESNYj008241 for ; Mon, 22 Jun 2009 14:28:23 GMT Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1756637AbZFVO1b (ORCPT ); Mon, 22 Jun 2009 10:27:31 -0400 Received: (majordomo@vger.kernel.org) by vger.kernel.org id S1752680AbZFVO1b (ORCPT ); Mon, 22 Jun 2009 10:27:31 -0400 Received: from mx2.redhat.com ([66.187.237.31]:49801 "EHLO mx2.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753577AbZFVO1a (ORCPT ); Mon, 22 Jun 2009 10:27:30 -0400 Received: from int-mx2.corp.redhat.com (int-mx2.corp.redhat.com [172.16.27.26]) by mx2.redhat.com (8.13.8/8.13.8) with ESMTP id n5MERXJ1008918 for ; Mon, 22 Jun 2009 10:27:33 -0400 Received: from ns3.rdu.redhat.com (ns3.rdu.redhat.com [10.11.255.199]) by int-mx2.corp.redhat.com (8.13.1/8.13.1) with ESMTP id n5MERWNM026335; Mon, 22 Jun 2009 10:27:32 -0400 Received: from dhcp-1-237.tlv.redhat.com (dhcp-1-237.tlv.redhat.com [10.35.1.237]) by ns3.rdu.redhat.com (8.13.8/8.13.8) with ESMTP id n5MERVKZ014761; Mon, 22 Jun 2009 10:27:31 -0400 Received: by dhcp-1-237.tlv.redhat.com (Postfix, from userid 13519) id 2034A18D4A0; Mon, 22 Jun 2009 17:27:31 +0300 (IDT) Date: Mon, 22 Jun 2009 17:27:31 +0300 From: Gleb Natapov To: avi@redhat.com Cc: kvm@vger.kernel.org Subject: [PATCH v2] Cleanup cpu loop Message-ID: <20090622142731.GC15865@redhat.com> MIME-Version: 1.0 Content-Disposition: inline X-Scanned-By: MIMEDefang 2.58 on 172.16.27.26 Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org Rearrange cpu loop to be (hopefully) more readable. Put difference between kernel/userspace irqchip in one place. Signed-off-by: Gleb Natapov --- Gleb. -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html diff --git a/qemu-kvm.c b/qemu-kvm.c index 3105753..2d0015c 100644 --- a/qemu-kvm.c +++ b/qemu-kvm.c @@ -1751,15 +1751,9 @@ int kvm_cpu_exec(CPUState *env) return 0; } -static int has_work(CPUState *env) +static int is_cpu_stopped(CPUState *env) { - if (!vm_running || (env && env->kvm_cpu_state.stopped)) - return 0; - if (kvm_irqchip_in_kernel(kvm_context)) - return 1; - if (!env->halted) - return 1; - return kvm_arch_has_work(env); + return !vm_running || env->kvm_cpu_state.stopped; } static void flush_queued_work(CPUState *env) @@ -1882,6 +1876,8 @@ static void update_regs_for_init(CPUState *env) #endif cpu_reset(env); + /* cpu_reset() clears env->halted, cpu should be halted after init */ + env->halted = 1; #ifdef TARGET_I386 /* restore SIPI vector */ @@ -1925,6 +1921,16 @@ static void qemu_kvm_system_reset(void) resume_all_threads(); } +static void process_irqchip_events(CPUState *env) +{ + if (env->kvm_cpu_state.init) + update_regs_for_init(env); + if (env->kvm_cpu_state.sipi_needed) + update_regs_for_sipi(env); + if (kvm_arch_has_work(env)) + env->halted = 0; +} + static int kvm_main_loop_cpu(CPUState *env) { setup_kernel_sigmask(env); @@ -1940,19 +1946,17 @@ static int kvm_main_loop_cpu(CPUState *env) kvm_arch_load_regs(env); while (1) { - while (!has_work(env)) - kvm_main_loop_wait(env, 1000); - if (env->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI)) - env->halted = 0; - if (!kvm_irqchip_in_kernel(kvm_context)) { - if (env->kvm_cpu_state.init) - update_regs_for_init(env); - if (env->kvm_cpu_state.sipi_needed) - update_regs_for_sipi(env); + int run_cpu = !is_cpu_stopped(env); + if (run_cpu && !kvm_irqchip_in_kernel(kvm_context)) { + process_irqchip_events(env); + run_cpu = !env->halted; } - if (!env->halted || kvm_irqchip_in_kernel(kvm_context)) - kvm_cpu_exec(env); - kvm_main_loop_wait(env, 0); + if (run_cpu) { + kvm_main_loop_wait(env, 0); + kvm_cpu_exec(env); + } else { + kvm_main_loop_wait(env, 1000); + } } pthread_mutex_unlock(&qemu_mutex); return 0;