Message ID | 20240408194232.118537-6-willy@infradead.org (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | Some cleanups for memory-failure | expand |
On 4/8/2024 12:42 PM, Matthew Wilcox (Oracle) wrote: > Removes two calls to compound_head(). Move the prototype to > internal.h; we definitely don't want code outside mm using it. > > Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> > --- > include/linux/mm.h | 1 - > mm/hwpoison-inject.c | 11 ++++++----- > mm/internal.h | 1 + > mm/memory-failure.c | 15 ++++++++++----- > 4 files changed, 17 insertions(+), 11 deletions(-) > > diff --git a/include/linux/mm.h b/include/linux/mm.h > index b9173e230804..19a67f45907b 100644 > --- a/include/linux/mm.h > +++ b/include/linux/mm.h > @@ -4011,7 +4011,6 @@ int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index, > extern int memory_failure(unsigned long pfn, int flags); > extern void memory_failure_queue_kick(int cpu); > extern int unpoison_memory(unsigned long pfn); > -extern void shake_page(struct page *p); > extern atomic_long_t num_poisoned_pages __read_mostly; > extern int soft_offline_page(unsigned long pfn, int flags); > #ifdef CONFIG_MEMORY_FAILURE > diff --git a/mm/hwpoison-inject.c b/mm/hwpoison-inject.c > index d0548e382b6b..c9d653f51e45 100644 > --- a/mm/hwpoison-inject.c > +++ b/mm/hwpoison-inject.c > @@ -15,7 +15,7 @@ static int hwpoison_inject(void *data, u64 val) > { > unsigned long pfn = val; > struct page *p; > - struct page *hpage; > + struct folio *folio; > int err; > > if (!capable(CAP_SYS_ADMIN)) > @@ -25,16 +25,17 @@ static int hwpoison_inject(void *data, u64 val) > return -ENXIO; > > p = pfn_to_page(pfn); > - hpage = compound_head(p); > + folio = page_folio(p); > > if (!hwpoison_filter_enable) > goto inject; > > - shake_page(hpage); > + shake_folio(folio); > /* > * This implies unable to support non-LRU pages except free page. > */ > - if (!PageLRU(hpage) && !PageHuge(p) && !is_free_buddy_page(p)) > + if (!folio_test_lru(folio) && !folio_test_hugetlb(folio) && > + !is_free_buddy_page(p)) > return 0; > > /* > @@ -42,7 +43,7 @@ static int hwpoison_inject(void *data, u64 val) > * the targeted owner (or on a free page). > * memory_failure() will redo the check reliably inside page lock. > */ > - err = hwpoison_filter(hpage); > + err = hwpoison_filter(&folio->page); > if (err) > return 0; > > diff --git a/mm/internal.h b/mm/internal.h > index 57c1055d5568..14cdc9ccb582 100644 > --- a/mm/internal.h > +++ b/mm/internal.h > @@ -1010,6 +1010,7 @@ static inline int find_next_best_node(int node, nodemask_t *used_node_mask) > /* > * mm/memory-failure.c > */ > +void shake_folio(struct folio *folio); > extern int hwpoison_filter(struct page *p); > > extern u32 hwpoison_filter_dev_major; > diff --git a/mm/memory-failure.c b/mm/memory-failure.c > index f94d85075ec1..2e64e132bba1 100644 > --- a/mm/memory-failure.c > +++ b/mm/memory-failure.c > @@ -357,20 +357,25 @@ static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags) > * Unknown page type encountered. Try to check whether it can turn PageLRU by > * lru_add_drain_all. > */ > -void shake_page(struct page *p) > +void shake_folio(struct folio *folio) > { > - if (PageHuge(p)) > + if (folio_test_hugetlb(folio)) > return; > /* > * TODO: Could shrink slab caches here if a lightweight range-based > * shrinker will be available. > */ > - if (PageSlab(p)) > + if (folio_test_slab(folio)) > return; > > lru_add_drain_all(); > } > -EXPORT_SYMBOL_GPL(shake_page); > +EXPORT_SYMBOL_GPL(shake_folio); > + > +static void shake_page(struct page *page) > +{ > + shake_folio(page_folio(page)); > +} > > static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma, > unsigned long address) > @@ -1623,7 +1628,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, > * shake_page() again to ensure that it's flushed. > */ > if (mlocked) > - shake_page(hpage); > + shake_folio(folio); > > /* > * Now that the dirty bit has been propagated to the Looks good. Reviewed-by: Jane Chu <jane.chu@oracle.com> -jane
diff --git a/include/linux/mm.h b/include/linux/mm.h index b9173e230804..19a67f45907b 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -4011,7 +4011,6 @@ int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index, extern int memory_failure(unsigned long pfn, int flags); extern void memory_failure_queue_kick(int cpu); extern int unpoison_memory(unsigned long pfn); -extern void shake_page(struct page *p); extern atomic_long_t num_poisoned_pages __read_mostly; extern int soft_offline_page(unsigned long pfn, int flags); #ifdef CONFIG_MEMORY_FAILURE diff --git a/mm/hwpoison-inject.c b/mm/hwpoison-inject.c index d0548e382b6b..c9d653f51e45 100644 --- a/mm/hwpoison-inject.c +++ b/mm/hwpoison-inject.c @@ -15,7 +15,7 @@ static int hwpoison_inject(void *data, u64 val) { unsigned long pfn = val; struct page *p; - struct page *hpage; + struct folio *folio; int err; if (!capable(CAP_SYS_ADMIN)) @@ -25,16 +25,17 @@ static int hwpoison_inject(void *data, u64 val) return -ENXIO; p = pfn_to_page(pfn); - hpage = compound_head(p); + folio = page_folio(p); if (!hwpoison_filter_enable) goto inject; - shake_page(hpage); + shake_folio(folio); /* * This implies unable to support non-LRU pages except free page. */ - if (!PageLRU(hpage) && !PageHuge(p) && !is_free_buddy_page(p)) + if (!folio_test_lru(folio) && !folio_test_hugetlb(folio) && + !is_free_buddy_page(p)) return 0; /* @@ -42,7 +43,7 @@ static int hwpoison_inject(void *data, u64 val) * the targeted owner (or on a free page). * memory_failure() will redo the check reliably inside page lock. */ - err = hwpoison_filter(hpage); + err = hwpoison_filter(&folio->page); if (err) return 0; diff --git a/mm/internal.h b/mm/internal.h index 57c1055d5568..14cdc9ccb582 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -1010,6 +1010,7 @@ static inline int find_next_best_node(int node, nodemask_t *used_node_mask) /* * mm/memory-failure.c */ +void shake_folio(struct folio *folio); extern int hwpoison_filter(struct page *p); extern u32 hwpoison_filter_dev_major; diff --git a/mm/memory-failure.c b/mm/memory-failure.c index f94d85075ec1..2e64e132bba1 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -357,20 +357,25 @@ static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags) * Unknown page type encountered. Try to check whether it can turn PageLRU by * lru_add_drain_all. */ -void shake_page(struct page *p) +void shake_folio(struct folio *folio) { - if (PageHuge(p)) + if (folio_test_hugetlb(folio)) return; /* * TODO: Could shrink slab caches here if a lightweight range-based * shrinker will be available. */ - if (PageSlab(p)) + if (folio_test_slab(folio)) return; lru_add_drain_all(); } -EXPORT_SYMBOL_GPL(shake_page); +EXPORT_SYMBOL_GPL(shake_folio); + +static void shake_page(struct page *page) +{ + shake_folio(page_folio(page)); +} static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma, unsigned long address) @@ -1623,7 +1628,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, * shake_page() again to ensure that it's flushed. */ if (mlocked) - shake_page(hpage); + shake_folio(folio); /* * Now that the dirty bit has been propagated to the
Removes two calls to compound_head(). Move the prototype to internal.h; we definitely don't want code outside mm using it. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- include/linux/mm.h | 1 - mm/hwpoison-inject.c | 11 ++++++----- mm/internal.h | 1 + mm/memory-failure.c | 15 ++++++++++----- 4 files changed, 17 insertions(+), 11 deletions(-)