@@ -57,7 +57,6 @@ static inline void cr4_clear_bits(unsigned long mask)
local_irq_restore(flags);
}
-#ifndef MODULE
/*
* 6 because 6 should be plenty and struct tlb_state will fit in two cache
* lines.
@@ -417,7 +416,6 @@ static inline void set_tlbstate_lam_mode(struct mm_struct *mm)
{
}
#endif
-#endif /* !MODULE */
static inline void __native_tlb_flush_global(unsigned long cr4)
{
@@ -1036,6 +1036,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
put_cpu();
mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end);
}
+EXPORT_SYMBOL_GPL(flush_tlb_mm_range);
static void do_flush_tlb_all(void *info)
@@ -269,6 +269,7 @@ void filemap_remove_folio(struct folio *folio)
filemap_free_folio(mapping, folio);
}
+EXPORT_SYMBOL_GPL(filemap_remove_folio);
/*
* page_cache_delete_batch - delete several folios from page cache
@@ -955,6 +956,7 @@ noinline int __filemap_add_folio(struct address_space *mapping,
return xas_error(&xas);
}
ALLOW_ERROR_INJECTION(__filemap_add_folio, ERRNO);
+EXPORT_SYMBOL_GPL(__filemap_add_folio);
int filemap_add_folio(struct address_space *mapping, struct folio *folio,
pgoff_t index, gfp_t gfp)
@@ -448,6 +448,7 @@ int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
pte_free(mm, new);
return 0;
}
+EXPORT_SYMBOL_GPL(__pte_alloc);
int __pte_alloc_kernel(pmd_t *pmd)
{
@@ -1780,6 +1780,7 @@ generic_get_unmapped_area(struct file *filp, unsigned long addr,
info.high_limit = mmap_end;
return vm_unmapped_area(&info);
}
+EXPORT_SYMBOL_GPL(generic_get_unmapped_area);
#ifndef HAVE_ARCH_UNMAPPED_AREA
unsigned long
@@ -1844,6 +1845,7 @@ generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
return addr;
}
+EXPORT_SYMBOL_GPL(generic_get_unmapped_area_topdown);
#ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
unsigned long
@@ -304,6 +304,7 @@ pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp)
rcu_read_unlock();
return NULL;
}
+EXPORT_SYMBOL_GPL(__pte_offset_map);
pte_t *pte_offset_map_nolock(struct mm_struct *mm, pmd_t *pmd,
unsigned long addr, spinlock_t **ptlp)
@@ -1468,6 +1468,7 @@ void folio_add_file_rmap_ptes(struct folio *folio, struct page *page,
{
__folio_add_file_rmap(folio, page, nr_pages, vma, RMAP_LEVEL_PTE);
}
+EXPORT_SYMBOL_GPL(folio_add_file_rmap_ptes);
/**
* folio_add_file_rmap_pmd - add a PMD mapping to a page range of a folio
@@ -1594,6 +1595,7 @@ void folio_remove_rmap_ptes(struct folio *folio, struct page *page,
{
__folio_remove_rmap(folio, page, nr_pages, vma, RMAP_LEVEL_PTE);
}
+EXPORT_SYMBOL_GPL(folio_remove_rmap_ptes);
/**
* folio_remove_rmap_pmd - remove a PMD mapping from a page range of a folio
This patch exports memory management functions that are useful to memory managers, so that they can be used in memory management filesystems created in kernel modules. Signed-off-by: Bijan Tabatabai <btabatabai@wisc.edu> --- arch/x86/include/asm/tlbflush.h | 2 -- arch/x86/mm/tlb.c | 1 + mm/filemap.c | 2 ++ mm/memory.c | 1 + mm/mmap.c | 2 ++ mm/pgtable-generic.c | 1 + mm/rmap.c | 2 ++ 7 files changed, 9 insertions(+), 2 deletions(-)