@@ -3344,7 +3344,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
*/
if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
!kvm_is_zone_device_pfn(pfn) && level == PT_PAGE_TABLE_LEVEL &&
- PageTransCompoundMap(pfn_to_page(pfn))) {
+ kvm_is_transparent_hugepage(pfn)) {
unsigned long mask;
/*
@@ -5959,7 +5959,7 @@ static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
*/
if (sp->role.direct && !kvm_is_reserved_pfn(pfn) &&
!kvm_is_zone_device_pfn(pfn) &&
- PageTransCompoundMap(pfn_to_page(pfn))) {
+ kvm_is_transparent_hugepage(pfn)) {
pte_list_remove(rmap_head, sptep);
if (kvm_available_flush_tlb_with_range())
@@ -160,6 +160,7 @@ extern unsigned long thp_get_unmapped_area(struct file *filp,
extern void prep_transhuge_page(struct page *page);
extern void free_transhuge_page(struct page *page);
+bool is_transparent_hugepage(struct page *page);
bool can_split_huge_page(struct page *page, int *pextra_pins);
int split_huge_page_to_list(struct page *page, struct list_head *list);
@@ -310,6 +311,11 @@ static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
static inline void prep_transhuge_page(struct page *page) {}
+static inline bool is_transparent_hugepage(struct page *page)
+{
+ return false;
+}
+
#define transparent_hugepage_flags 0UL
#define thp_get_unmapped_area NULL
@@ -985,6 +985,7 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
bool kvm_is_reserved_pfn(kvm_pfn_t pfn);
bool kvm_is_zone_device_pfn(kvm_pfn_t pfn);
+bool kvm_is_transparent_hugepage(kvm_pfn_t pfn);
struct kvm_irq_ack_notifier {
struct hlist_node link;
@@ -527,6 +527,17 @@ void prep_transhuge_page(struct page *page)
set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR);
}
+bool is_transparent_hugepage(struct page *page)
+{
+ if (!PageCompound(page))
+ return 0;
+
+ page = compound_head(page);
+ return is_huge_zero_page(page) ||
+ page[1].compound_dtor == TRANSHUGE_PAGE_DTOR;
+}
+EXPORT_SYMBOL_GPL(is_transparent_hugepage);
+
static unsigned long __thp_get_unmapped_area(struct file *filp, unsigned long len,
loff_t off, unsigned long flags, unsigned long size)
{
@@ -1372,14 +1372,8 @@ static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
{
kvm_pfn_t pfn = *pfnp;
gfn_t gfn = *ipap >> PAGE_SHIFT;
- struct page *page = pfn_to_page(pfn);
- /*
- * PageTransCompoundMap() returns true for THP and
- * hugetlbfs. Make sure the adjustment is done only for THP
- * pages.
- */
- if (!PageHuge(page) && PageTransCompoundMap(page)) {
+ if (kvm_is_transparent_hugepage(pfn)) {
unsigned long mask;
/*
* The address we faulted on is backed by a transparent huge
@@ -191,6 +191,16 @@ bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
return true;
}
+bool kvm_is_transparent_hugepage(kvm_pfn_t pfn)
+{
+ struct page *page = pfn_to_page(pfn);
+
+ if (!PageTransCompoundMap(page))
+ return false;
+
+ return is_transparent_hugepage(compound_head(page));
+}
+
/*
* Switches to specified vcpu, until a matching vcpu_put()
*/
Add a helper, is_transparent_hugepage(), to explicitly check whether a compound page is a THP and use it when populating KVM's secondary MMU. The explicit check fixes a bug where a remapped compound page, e.g. for an XDP Rx socket, is mapped into a KVM guest and is mistaken for a THP, which results in KVM incorrectly creating a huge page in its secondary MMU. Fixes: 936a5fe6e6148 ("thp: kvm mmu transparent hugepage support") Reported-by: syzbot+c9d1fb51ac9d0d10c39d@syzkaller.appspotmail.com Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: stable@vger.kernel.org Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> --- arch/x86/kvm/mmu/mmu.c | 4 ++-- include/linux/huge_mm.h | 6 ++++++ include/linux/kvm_host.h | 1 + mm/huge_memory.c | 11 +++++++++++ virt/kvm/arm/mmu.c | 8 +------- virt/kvm/kvm_main.c | 10 ++++++++++ 6 files changed, 31 insertions(+), 9 deletions(-)