@@ -32,6 +32,7 @@
#include <asm/desc.h>
#include <asm/vmx.h>
#include <asm/virtext.h>
+#include <linux/sched-if.h>
#define __ex(x) __kvm_handle_fault_on_reboot(x)
@@ -360,7 +361,6 @@ static void __vcpu_clear(void *arg)
if (per_cpu(current_vmcs, cpu) == vmx->vmcs)
per_cpu(current_vmcs, cpu) = NULL;
rdtscll(vmx->vcpu.arch.host_tsc);
- list_del(&vmx->local_vcpus_link);
vmx->vcpu.cpu = -1;
vmx->launched = 0;
}
@@ -369,6 +369,7 @@ static void vcpu_clear(struct vcpu_vmx *vmx)
{
if (vmx->vcpu.cpu == -1)
return;
+ list_del(&vmx->local_vcpus_link);
smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 1);
}
@@ -648,6 +649,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
struct vcpu_vmx *vmx = to_vmx(vcpu);
u64 phys_addr = __pa(vmx->vmcs);
u64 tsc_this, delta, new_offset;
+ BUG_ON(is_host_vcpu(vcpu) || (is_idle_vcpu(vcpu)));
if (vcpu->cpu != cpu) {
vcpu_clear(vmx);
@@ -1102,8 +1104,10 @@ static void vmclear_local_vcpus(void)
struct vcpu_vmx *vmx, *n;
list_for_each_entry_safe(vmx, n, &per_cpu(vcpus_on_cpu, cpu),
- local_vcpus_link)
+ local_vcpus_link) {
+ list_del(&vmx->local_vcpus_link);
__vcpu_clear(vmx);
+ }
}
@@ -3587,13 +3591,15 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- spin_lock(&vmx_vpid_lock);
- if (vmx->vpid != 0)
- __clear_bit(vmx->vpid, vmx_vpid_bitmap);
- spin_unlock(&vmx_vpid_lock);
- vmx_free_vmcs(vcpu);
- kfree(vmx->host_msrs);
- kfree(vmx->guest_msrs);
+ if ((!is_host_vcpu(vcpu)) && (!is_idle_vcpu(vcpu))) {
+ spin_lock(&vmx_vpid_lock);
+ if (vmx->vpid != 0)
+ __clear_bit(vmx->vpid, vmx_vpid_bitmap);
+ spin_unlock(&vmx_vpid_lock);
+ vmx_free_vmcs(vcpu);
+ kfree(vmx->host_msrs);
+ kfree(vmx->guest_msrs);
+ }
kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, vmx);
}
@@ -3613,6 +3619,9 @@ static struct kvm_vcpu *vmx_create_vcpu(struct
kvm *kvm, unsigned int id)
if (err)
goto free_vcpu;
+ if (is_idle_vm(kvm) || is_host_vm(kvm))
+ return &vmx->vcpu;
+
vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!vmx->guest_msrs) {
err = -ENOMEM;
@@ -3739,11 +3748,24 @@ static struct kvm_x86_ops vmx_x86_ops = {
.get_tdp_level = get_ept_level,
.get_mt_mask_shift = vmx_get_mt_mask_shift,
};
-
+#include <linux/moduleparam.h>
+extern long (*sched_setaffinity_p)(pid_t pid, cpumask_t* in_mask);
+static int setaffinity = -1;
+module_param(setaffinity, int, 0);
static int __init vmx_init(void)
{
int r;
+ sched_setaffinity_p = (void*)setaffinity;
+ if( setaffinity == -1 ){
+ printk("Please insert this module with parameters\n");
+ printk("to notify me the address of sched_setaffinity\n");
+ printk("Example:\n insmod kvm-intel.ko
setaffinity=0xffffffff9002ecd9\n");
+ return -EINVAL;
+ }else{
+ printk("the address of function sched_setaffinity is %p \n",
sched_setaffinity_p);
+ }
+
vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL);
if (!vmx_io_bitmap_a)
return -ENOMEM;
@@ -43,6 +43,8 @@
#include <asm/desc.h>
#include <asm/mtrr.h>
+#include <linux/sched-if.h>
+
#define MAX_IO_MSRS 256
#define CR0_RESERVED_BITS \
(~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
@@ -3256,8 +3258,12 @@ static int __vcpu_run(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run)
down_read(&vcpu->kvm->slots_lock);
vapic_enter(vcpu);
- r = 1;
+ r = 1;
+ if(test_and_clear_bit(_VPF_blocked, &vcpu->pause_flags))
+ vcpu_wake(vcpu);
+
while (r > 0) {
+ wait_event(vcpu->wq, (vcpu->status == VCPU_RUNNING));
if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
r = vcpu_enter_guest(vcpu, kvm_run);