From patchwork Wed Apr 22 14:58:55 2009 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: alex X-Patchwork-Id: 19387 Received: from vger.kernel.org (vger.kernel.org [209.132.176.167]) by demeter.kernel.org (8.14.2/8.14.2) with ESMTP id n3MExu3O019808 for ; Wed, 22 Apr 2009 14:59:56 GMT Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753185AbZDVO66 (ORCPT ); Wed, 22 Apr 2009 10:58:58 -0400 Received: (majordomo@vger.kernel.org) by vger.kernel.org id S1753904AbZDVO66 (ORCPT ); Wed, 22 Apr 2009 10:58:58 -0400 Received: from rv-out-0506.google.com ([209.85.198.227]:8575 "EHLO rv-out-0506.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752612AbZDVO64 (ORCPT ); Wed, 22 Apr 2009 10:58:56 -0400 Received: by rv-out-0506.google.com with SMTP id f9so6953rvb.1 for ; Wed, 22 Apr 2009 07:58:55 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=gamma; h=domainkey-signature:mime-version:received:date:message-id:subject :from:to:content-type:content-transfer-encoding; bh=eUnYR8Jt6l1qENCToa0/XtaGwDxFkU2kH7yzr5L7K+A=; b=gM+AJwlMESvyDyBnAWev4bvgifo8GwH5MX5+jRr/LcKW/jCV9Z/ploemf5MgJ/zOHM nm9KzDbYzZ3zZ5sPwGdk7ubeG9aY6NivvcbxfUaC84kO6VsGgRRvWRM5XMrjavmtXIyQ k/Su72YtJzbhQ913AK0CIsCxlM+wBdQHxqxvo= DomainKey-Signature: a=rsa-sha1; c=nofws; d=gmail.com; s=gamma; h=mime-version:date:message-id:subject:from:to:content-type :content-transfer-encoding; b=wf6laKSVr0zb24RAYb9bJesaN18wT+Jcv7DdyjsAdJsUbmAHFHoRIpEvxONDemsdUX AbDvWFgk/2f7hWXrkX6HNjf2gtaDimusIxVmqM7uNsekGcsZRMnKd+PaXBKsIhlyfzj9 DwOxu0popyZZbjMSEzUPBGs8yErWYTlAd6bSw= MIME-Version: 1.0 Received: by 10.142.81.7 with SMTP id e7mr5209544wfb.106.1240412335856; Wed, 22 Apr 2009 07:58:55 -0700 (PDT) Date: Wed, 22 Apr 2009 22:58:55 +0800 Message-ID: <820ac2e90904220758m5dc9dd0dm98835d9761e09b7f@mail.gmail.com> Subject: patch for virtual machine oriented scheduling(5) From: alex To: avi@redhat.com, anthony@codemonkey.ws, kvm@vger.kernel.org Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org changes to standard KVM files: -------------------------------------------------------------------------------------------- else { @@ -3297,13 +3303,9 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) kvm_run->exit_reason = KVM_EXIT_INTR; ++vcpu->stat.signal_exits; } - if (need_resched()) { - up_read(&vcpu->kvm->slots_lock); - kvm_resched(vcpu); - down_read(&vcpu->kvm->slots_lock); - } } + set_bit(_VPF_blocked, &vcpu->pause_flags); up_read(&vcpu->kvm->slots_lock); post_kvm_run_save(vcpu, kvm_run); @@ -4382,16 +4384,17 @@ static void kvm_free_vcpus(struct kvm *kvm) /* * Unpin any mmu pages first. */ - for (i = 0; i < KVM_MAX_VCPUS; ++i) - if (kvm->vcpus[i]) - kvm_unload_vcpu_mmu(kvm->vcpus[i]); + if ((!is_idle_vm(kvm)) && (!is_host_vm(kvm))) { + for (i = 0; i < KVM_MAX_VCPUS; ++i) + if (kvm->vcpus[i]) + kvm_unload_vcpu_mmu(kvm->vcpus[i]); + } for (i = 0; i < KVM_MAX_VCPUS; ++i) { if (kvm->vcpus[i]) { kvm_arch_vcpu_free(kvm->vcpus[i]); kvm->vcpus[i] = NULL; } } - } void kvm_arch_sync_events(struct kvm *kvm) @@ -4401,16 +4404,20 @@ void kvm_arch_sync_events(struct kvm *kvm) void kvm_arch_destroy_vm(struct kvm *kvm) { - kvm_iommu_unmap_guest(kvm); - kvm_free_pit(kvm); - kfree(kvm->arch.vpic); - kfree(kvm->arch.vioapic); - kvm_free_vcpus(kvm); - kvm_free_physmem(kvm); - if (kvm->arch.apic_access_page) - put_page(kvm->arch.apic_access_page); - if (kvm->arch.ept_identity_pagetable) - put_page(kvm->arch.ept_identity_pagetable); + if ((!is_idle_vm(kvm)) && (!is_host_vm(kvm))) { + kvm_iommu_unmap_guest(kvm); + kvm_free_pit(kvm); + kfree(kvm->arch.vpic); + kfree(kvm->arch.vioapic); + kvm_free_vcpus(kvm); + kvm_free_physmem(kvm); + if (kvm->arch.apic_access_page) + put_page(kvm->arch.apic_access_page); + if (kvm->arch.ept_identity_pagetable) + put_page(kvm->arch.ept_identity_pagetable); + }else + kvm_free_vcpus(kvm); + kfree(kvm); } --- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index c6997c0..ec69310 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -32,6 +32,7 @@ #include #include #include +#include #define __ex(x) __kvm_handle_fault_on_reboot(x) @@ -360,7 +361,6 @@ static void __vcpu_clear(void *arg) if (per_cpu(current_vmcs, cpu) == vmx->vmcs) per_cpu(current_vmcs, cpu) = NULL; rdtscll(vmx->vcpu.arch.host_tsc); - list_del(&vmx->local_vcpus_link); vmx->vcpu.cpu = -1; vmx->launched = 0; } @@ -369,6 +369,7 @@ static void vcpu_clear(struct vcpu_vmx *vmx) { if (vmx->vcpu.cpu == -1) return; + list_del(&vmx->local_vcpus_link); smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 1); } @@ -648,6 +649,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) struct vcpu_vmx *vmx = to_vmx(vcpu); u64 phys_addr = __pa(vmx->vmcs); u64 tsc_this, delta, new_offset; + BUG_ON(is_host_vcpu(vcpu) || (is_idle_vcpu(vcpu))); if (vcpu->cpu != cpu) { vcpu_clear(vmx); @@ -1102,8 +1104,10 @@ static void vmclear_local_vcpus(void) struct vcpu_vmx *vmx, *n; list_for_each_entry_safe(vmx, n, &per_cpu(vcpus_on_cpu, cpu), - local_vcpus_link) + local_vcpus_link) { + list_del(&vmx->local_vcpus_link); __vcpu_clear(vmx); + } } @@ -3587,13 +3591,15 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); - spin_lock(&vmx_vpid_lock); - if (vmx->vpid != 0) - __clear_bit(vmx->vpid, vmx_vpid_bitmap); - spin_unlock(&vmx_vpid_lock); - vmx_free_vmcs(vcpu); - kfree(vmx->host_msrs); - kfree(vmx->guest_msrs); + if ((!is_host_vcpu(vcpu)) && (!is_idle_vcpu(vcpu))) { + spin_lock(&vmx_vpid_lock); + if (vmx->vpid != 0) + __clear_bit(vmx->vpid, vmx_vpid_bitmap); + spin_unlock(&vmx_vpid_lock); + vmx_free_vmcs(vcpu); + kfree(vmx->host_msrs); + kfree(vmx->guest_msrs); + } kvm_vcpu_uninit(vcpu); kmem_cache_free(kvm_vcpu_cache, vmx); } @@ -3613,6 +3619,9 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) if (err) goto free_vcpu; + if (is_idle_vm(kvm) || is_host_vm(kvm)) + return &vmx->vcpu; + vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!vmx->guest_msrs) { err = -ENOMEM; @@ -3739,11 +3748,24 @@ static struct kvm_x86_ops vmx_x86_ops = { .get_tdp_level = get_ept_level, .get_mt_mask_shift = vmx_get_mt_mask_shift, }; - +#include +extern long (*sched_setaffinity_p)(pid_t pid, cpumask_t* in_mask); +static int setaffinity = -1; +module_param(setaffinity, int, 0); static int __init vmx_init(void) { int r; + sched_setaffinity_p = (void*)setaffinity; + if( setaffinity == -1 ){ + printk("Please insert this module with parameters\n"); + printk("to notify me the address of sched_setaffinity\n"); + printk("Example:\n insmod kvm-intel.ko setaffinity=0xffffffff9002ecd9\n"); + return -EINVAL; + }else{ + printk("the address of function sched_setaffinity is %p \n", sched_setaffinity_p); + } + vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL); if (!vmx_io_bitmap_a) return -ENOMEM; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 0bb4131..3b19058 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -43,6 +43,8 @@ #include #include +#include + #define MAX_IO_MSRS 256 #define CR0_RESERVED_BITS \ (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \ @@ -3256,8 +3258,12 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) down_read(&vcpu->kvm->slots_lock); vapic_enter(vcpu); - r = 1; + r = 1; + if(test_and_clear_bit(_VPF_blocked, &vcpu->pause_flags)) + vcpu_wake(vcpu); + while (r > 0) { + wait_event(vcpu->wq, (vcpu->status == VCPU_RUNNING)); if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) r = vcpu_enter_guest(vcpu, kvm_run);