@@ -35,10 +35,13 @@
#define stage2_pud_huge(pud) pud_huge(pud)
+#define S2_PGDIR_SIZE PGDIR_SIZE
+#define S2_PGDIR_MASK PGDIR_MASK
+
/* Open coded p*d_addr_end that can deal with 64bit addresses */
static inline phys_addr_t stage2_pgd_addr_end(phys_addr_t addr, phys_addr_t end)
{
- phys_addr_t boundary = (addr + PGDIR_SIZE) & PGDIR_MASK;
+ phys_addr_t boundary = (addr + S2_PGDIR_SIZE) & S2_PGDIR_MASK;
return (boundary - 1 < end - 1) ? boundary : end;
}
@@ -41,7 +41,7 @@ static unsigned long hyp_idmap_start;
static unsigned long hyp_idmap_end;
static phys_addr_t hyp_idmap_vector;
-#define S2_PGD_SIZE (PTRS_PER_S2_PGD * sizeof(pgd_t))
+#define S2_PGD_TABLE_SIZE (PTRS_PER_S2_PGD * sizeof(pgd_t))
#define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
#define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0)
@@ -299,7 +299,7 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
* If the range is too large, release the kvm->mmu_lock
* to prevent starvation and lockup detector warnings.
*/
- if (size > S2_PUD_SIZE)
+ if (size > S2_PGDIR_SIZE)
cond_resched_lock(&kvm->mmu_lock);
next = stage2_pgd_addr_end(addr, end);
if (!stage2_pgd_none(*pgd))
@@ -747,7 +747,7 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
}
/* Allocate the HW PGD, making sure that each page gets its own refcount */
- pgd = alloc_pages_exact(S2_PGD_SIZE, GFP_KERNEL | __GFP_ZERO);
+ pgd = alloc_pages_exact(S2_PGD_TABLE_SIZE, GFP_KERNEL | __GFP_ZERO);
if (!pgd)
return -ENOMEM;
@@ -843,7 +843,7 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
spin_unlock(&kvm->mmu_lock);
/* Free the HW pgd, one page at a time */
- free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE);
+ free_pages_exact(kvm->arch.pgd, S2_PGD_TABLE_SIZE);
kvm->arch.pgd = NULL;
}