===================================================================
@@ -1636,6 +1636,29 @@ gfn_t unalias_gfn(struct kvm *kvm, gfn_t
return gfn;
}
+static int kvm_root_gfn_in_range(struct kvm *kvm, gfn_t base_gfn,
+ gfn_t end_gfn, bool unalias)
+{
+ struct kvm_vcpu *vcpu;
+ gfn_t root_gfn;
+ int i;
+
+ for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+ vcpu = kvm->vcpus[i];
+ if (!vcpu)
+ continue;
+ root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
+ if (unalias)
+ root_gfn = unalias_gfn(kvm, root_gfn);
+ if (root_gfn >= base_gfn && root_gfn <= end_gfn) {
+ set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
/*
* Set a new alias region. Aliases map a portion of physical memory into
* another portion. This is useful for memory windows, for example the PC
@@ -1666,6 +1689,19 @@ static int kvm_vm_ioctl_set_memory_alias
spin_lock(&kvm->mmu_lock);
p = &kvm->arch.aliases[alias->slot];
+
+ /* FIXME: either disallow shrinking alias slots or disable
+ * size changes as done with memslots
+ */
+ if (!alias->memory_size) {
+ r = -EBUSY;
+ if (kvm_root_gfn_in_range(kvm, p->base_gfn,
+ p->base_gfn + p->npages - 1,
+ false))
+ goto out_unlock;
+ }
+
+
p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
p->npages = alias->memory_size >> PAGE_SHIFT;
p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
@@ -1682,6 +1718,9 @@ static int kvm_vm_ioctl_set_memory_alias
return 0;
+out_unlock:
+ spin_unlock(&kvm->mmu_lock);
+ up_write(&kvm->slots_lock);
out:
return r;
}
@@ -4552,6 +4591,15 @@ void kvm_arch_flush_shadow(struct kvm *k
kvm_mmu_zap_all(kvm);
}
+int kvm_arch_can_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
+{
+ int ret;
+
+ ret = kvm_root_gfn_in_range(kvm, slot->base_gfn,
+ slot->base_gfn + slot->npages - 1, true);
+ return !ret;
+}
+
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
===================================================================
@@ -200,6 +200,7 @@ int kvm_arch_set_memory_region(struct kv
struct kvm_memory_slot old,
int user_alloc);
void kvm_arch_flush_shadow(struct kvm *kvm);
+int kvm_arch_can_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot);
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
===================================================================
@@ -1061,6 +1061,13 @@ static int kvm_vm_release(struct inode *
return 0;
}
+#ifndef __KVM_HAVE_ARCH_CAN_FREE_MEMSLOT
+int kvm_arch_can_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
+{
+ return 1;
+}
+#endif
+
/*
* Allocate some memory and give it an address in the guest physical address
* space.
@@ -1179,8 +1186,13 @@ int __kvm_set_memory_region(struct kvm *
}
#endif /* not defined CONFIG_S390 */
- if (!npages)
+ if (!npages) {
kvm_arch_flush_shadow(kvm);
+ if (!kvm_arch_can_free_memslot(kvm, memslot)) {
+ r = -EBUSY;
+ goto out_free;
+ }
+ }
spin_lock(&kvm->mmu_lock);
if (mem->slot >= kvm->nmemslots)
===================================================================
@@ -18,6 +18,8 @@
#define __KVM_HAVE_GUEST_DEBUG
#define __KVM_HAVE_MSIX
+#define __KVM_HAVE_ARCH_CAN_FREE_MEMSLOT
+
/* Architectural interrupt line count. */
#define KVM_NR_INTERRUPTS 256
Disallow the deletion of memory slots (and aliases, for x86 case), if a vcpu contains a cr3 that points to such slot/alias. This complements commit 6c20e1442bb1c62914bb85b7f4a38973d2a423ba. v2: - set KVM_REQ_TRIPLE_FAULT - use __KVM_HAVE_ARCH_CAN_FREE_MEMSLOT to avoid duplication of stub Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html