@@ -176,7 +176,6 @@ extern struct list_head huge_boot_pages[MAX_NUMNODES];
/* arch callbacks */
-#ifndef CONFIG_HIGHPTE
/*
* pte_offset_huge() and pte_alloc_huge() are helpers for those architectures
* which may go down to the lowest PTE level in their huge_pte_offset() and
@@ -191,7 +190,6 @@ static inline pte_t *pte_alloc_huge(struct mm_struct *mm, pmd_t *pmd,
{
return pte_alloc(mm, pmd) ? NULL : pte_offset_huge(pmd, address);
}
-#endif
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, unsigned long sz);
@@ -966,9 +964,8 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
*/
if (size >= PUD_SIZE)
return pud_lockptr(mm, (pud_t *) pte);
- else if (size >= PMD_SIZE || IS_ENABLED(CONFIG_HIGHPTE))
+ else if (size >= PMD_SIZE)
return pmd_lockptr(mm, (pmd_t *) pte);
- /* pte_alloc_huge() only applies with !CONFIG_HIGHPTE */
return ptep_lockptr(mm, pte);
}
@@ -2954,7 +2954,6 @@ static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
static inline spinlock_t *ptep_lockptr(struct mm_struct *mm, pte_t *pte)
{
- BUILD_BUG_ON(IS_ENABLED(CONFIG_HIGHPTE));
BUILD_BUG_ON(MAX_PTRS_PER_PTE * sizeof(pte_t) > PAGE_SIZE);
return ptlock_ptr(virt_to_ptdesc(pte));
}
@@ -119,14 +119,6 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
#define pte_offset_kernel pte_offset_kernel
#endif
-#ifdef CONFIG_HIGHPTE
-#define __pte_map(pmd, address) \
- ((pte_t *)kmap_local_page(pmd_page(*(pmd))) + pte_index((address)))
-#define pte_unmap(pte) do { \
- kunmap_local((pte)); \
- rcu_read_unlock(); \
-} while (0)
-#else
static inline pte_t *__pte_map(pmd_t *pmd, unsigned long address)
{
return pte_offset_kernel(pmd, address);
@@ -135,7 +127,6 @@ static inline void pte_unmap(pte_t *pte)
{
rcu_read_unlock();
}
-#endif
void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable);