@@ -139,14 +139,20 @@ static inline int clear_mce_nospec(unsigned long pfn)
*/
#endif
+typedef int (*gpc_callback)(struct page*, unsigned int);
+
struct grouped_page_cache {
struct shrinker shrinker;
struct list_lru lru;
gfp_t gfp;
+ gpc_callback pre_add_to_cache;
+ gpc_callback pre_shrink_free;
atomic_t nid_round_robin;
};
-int init_grouped_page_cache(struct grouped_page_cache *gpc, gfp_t gfp);
+int init_grouped_page_cache(struct grouped_page_cache *gpc, gfp_t gfp,
+ gpc_callback pre_add_to_cache,
+ gpc_callback pre_shrink_free);
struct page *get_grouped_page(int node, struct grouped_page_cache *gpc);
void free_grouped_page(struct grouped_page_cache *gpc, struct page *page);
@@ -2356,6 +2356,9 @@ static void __dispose_pages(struct grouped_page_cache *gpc, struct list_head *he
list_del(cur);
+ if (gpc->pre_shrink_free)
+ gpc->pre_shrink_free(page, 1);
+
__free_pages(page, 0);
}
}
@@ -2413,18 +2416,33 @@ static struct page *__replenish_grouped_pages(struct grouped_page_cache *gpc, in
int i;
page = __alloc_page_order(node, gpc->gfp, HUGETLB_PAGE_ORDER);
- if (!page)
- return __alloc_page_order(node, gpc->gfp, 0);
+ if (!page) {
+ page = __alloc_page_order(node, gpc->gfp, 0);
+ if (gpc->pre_add_to_cache)
+ gpc->pre_add_to_cache(page, 1);
+ return page;
+ }
split_page(page, HUGETLB_PAGE_ORDER);
+ /* If fail to prepare to be added, try to clean up and free */
+ if (gpc->pre_add_to_cache && gpc->pre_add_to_cache(page, hpage_cnt)) {
+ if (gpc->pre_shrink_free)
+ gpc->pre_shrink_free(page, hpage_cnt);
+ for (i = 0; i < hpage_cnt; i++)
+ __free_pages(&page[i], 0);
+ return NULL;
+ }
+
for (i = 1; i < hpage_cnt; i++)
free_grouped_page(gpc, &page[i]);
return &page[0];
}
-int init_grouped_page_cache(struct grouped_page_cache *gpc, gfp_t gfp)
+int init_grouped_page_cache(struct grouped_page_cache *gpc, gfp_t gfp,
+ gpc_callback pre_add_to_cache,
+ gpc_callback pre_shrink_free)
{
int err = 0;
@@ -2442,6 +2460,8 @@ int init_grouped_page_cache(struct grouped_page_cache *gpc, gfp_t gfp)
if (err)
list_lru_destroy(&gpc->lru);
+ gpc->pre_add_to_cache = pre_add_to_cache;
+ gpc->pre_shrink_free = pre_shrink_free;
out:
return err;
}
@@ -926,7 +926,8 @@ int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
#ifdef CONFIG_PKS_PG_TABLES
static int __init pks_page_init(void)
{
- pks_page_en = !init_grouped_page_cache(&gpc_pks, GFP_KERNEL | PGTABLE_HIGHMEM);
+ pks_page_en = !init_grouped_page_cache(&gpc_pks, GFP_KERNEL | PGTABLE_HIGHMEM,
+ NULL, NULL);
out:
return !pks_page_en;
Future patches will need to set permissions on pages in the cache, so add some callbacks that let gouped page cache callers provide a callback the component can call when replenishing the cache or free-ing pages via the shrinker. Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com> --- arch/x86/include/asm/set_memory.h | 8 +++++++- arch/x86/mm/pat/set_memory.c | 26 +++++++++++++++++++++++--- arch/x86/mm/pgtable.c | 3 ++- 3 files changed, 32 insertions(+), 5 deletions(-)