diff mbox

[kvm-unit-tests,v2,5/6] x86: vmx: split large EPTEs in install_ept_entry

Message ID 1456871694-23042-6-git-send-email-pfeiner@google.com (mailing list archive)
State New, archived
Headers show

Commit Message

Peter Feiner March 1, 2016, 10:34 p.m. UTC
When install_ept_entry encountered a leaf entry above pte_level, it
just cleared the EPT_LARGE_PAGE bit and continued the traversal _using
the first 4K page from the large page as the next level of the page
table_! This is broken because (1) the data in the large page would be
overwritten, and (2) all of other entires in the new level of the page
table would contain garbage.

Now, install_ept_entry splits the large mapping by allocating a new
page and filling it in with 512 PTEs that point to the large page's
constituent 2M or 4K pages.

This path is exercised in the VMX EPT test when 2m EPT pages are
enabled. The bug wasn't obvious because the free list is sorted in
descending order of HPA, thus the large page being overwritten with an
EPTE happened to only contain 0s.

Fixes: 04b0e0f342978f08b8b0b068c08c9d45ee80e3f7 ("nEPT: Fix test cases for 2M huge pages").
Signed-off-by: Peter Feiner <pfeiner@google.com>
---
 x86/vmx.c | 42 ++++++++++++++++++++++++++++++++++++++++--
 x86/vmx.h |  1 +
 2 files changed, 41 insertions(+), 2 deletions(-)
diff mbox

Patch

diff --git a/x86/vmx.c b/x86/vmx.c
index 140ad86..d3fdc71 100644
--- a/x86/vmx.c
+++ b/x86/vmx.c
@@ -215,6 +215,44 @@  asm(
 );
 
 /* EPT paging structure related functions */
+/* split_large_ept_entry: Split a 2M/1G large page into 512 smaller PTEs.
+		@ptep : large page table entry to split
+		@level : level of ptep (2 or 3)
+ */
+static void split_large_ept_entry(unsigned long *ptep, int level)
+{
+	unsigned long *new_pt;
+	unsigned long gpa;
+	unsigned long pte;
+	unsigned long prototype;
+	int i;
+
+	pte = *ptep;
+	assert(pte & EPT_PRESENT);
+	assert(pte & EPT_LARGE_PAGE);
+	assert(level == 2 || level == 3);
+
+	new_pt = alloc_page();
+	assert(new_pt);
+	memset(new_pt, 0, PAGE_SIZE);
+
+	prototype = pte & ~EPT_ADDR_MASK;
+	if (level == 2)
+		prototype &= ~EPT_LARGE_PAGE;
+
+	gpa = pte & EPT_ADDR_MASK;
+	for (i = 0; i < EPT_PGDIR_ENTRIES; i++) {
+		new_pt[i] = prototype | gpa;
+		gpa += 1ul << EPT_LEVEL_SHIFT(level - 1);
+	}
+
+	pte &= ~EPT_LARGE_PAGE;
+	pte &= ~EPT_ADDR_MASK;
+	pte |= virt_to_phys(new_pt);
+
+	*ptep = pte;
+}
+
 /* install_ept_entry : Install a page to a given level in EPT
 		@pml4 : addr of pml4 table
 		@pte_level : level of PTE to set
@@ -244,8 +282,8 @@  void install_ept_entry(unsigned long *pml4,
 			memset(new_pt, 0, PAGE_SIZE);
 			pt[offset] = virt_to_phys(new_pt)
 					| EPT_RA | EPT_WA | EPT_EA;
-		} else
-			pt[offset] &= ~EPT_LARGE_PAGE;
+		} else if (pt[offset] & EPT_LARGE_PAGE)
+			split_large_ept_entry(&pt[offset], level);
 		pt = phys_to_virt(pt[offset] & EPT_ADDR_MASK);
 	}
 	offset = (guest_addr >> EPT_LEVEL_SHIFT(level)) & EPT_PGDIR_MASK;
diff --git a/x86/vmx.h b/x86/vmx.h
index 018051a..35b5431 100644
--- a/x86/vmx.h
+++ b/x86/vmx.h
@@ -467,6 +467,7 @@  enum Ctrl1 {
 #define EPT_PAGE_LEVEL		4
 #define EPT_PGDIR_WIDTH		9
 #define EPT_PGDIR_MASK		511
+#define EPT_PGDIR_ENTRIES	(1 << EPT_PGDIR_WIDTH)
 #define EPT_LEVEL_SHIFT(level)	(((level)-1) * EPT_PGDIR_WIDTH + 12)
 #define EPT_ADDR_MASK		GENMASK(52, 11)
 #define PAGE_MASK		(~(PAGE_SIZE-1))