===================================================================
@@ -1393,6 +1393,9 @@ static __init int hardware_setup(void)
if (!cpu_has_vmx_tpr_shadow())
kvm_x86_ops->update_cr8_intercept = NULL;
+ if (enable_ept && !cpu_has_vmx_ept_2m_page())
+ kvm_disable_largepages();
+
return alloc_kvm_area();
}
===================================================================
@@ -219,6 +219,7 @@ int kvm_arch_set_memory_region(struct kv
struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old,
int user_alloc);
+void kvm_disable_largepages(void);
void kvm_arch_flush_shadow(struct kvm *kvm);
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
===================================================================
@@ -85,6 +85,8 @@ static long kvm_vcpu_ioctl(struct file *
static bool kvm_rebooting;
+static bool largepages_disabled = false;
+
#ifdef KVM_CAP_DEVICE_ASSIGNMENT
static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
int assigned_dev_id)
@@ -1171,9 +1173,11 @@ int __kvm_set_memory_region(struct kvm *
ugfn = new.userspace_addr >> PAGE_SHIFT;
/*
* If the gfn and userspace address are not aligned wrt each
- * other, disable large page support for this slot
+ * other, or if explicitly asked to, disable large page
+ * support for this slot
*/
- if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE - 1))
+ if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE - 1) ||
+ largepages_disabled)
for (i = 0; i < largepages; ++i)
new.lpage_info[i].write_count = 1;
}
@@ -1286,6 +1290,12 @@ out:
return r;
}
+void kvm_disable_largepages(void)
+{
+ largepages_disabled = true;
+}
+EXPORT_SYMBOL_GPL(kvm_disable_largepages);
+
int is_error_page(struct page *page)
{
return page == bad_page;
Disable usage of 2M pages if VMX_EPT_2MB_PAGE_BIT (bit 16) is clear in MSR_IA32_VMX_EPT_VPID_CAP and EPT is enabled. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>