Message ID | 20241205175000.3187069-7-willy@infradead.org (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | Add zpdesc memory descriptor for zswap.zpool | expand |
On Thu, Dec 05, 2024 at 05:49:43PM +0000, Matthew Wilcox (Oracle) wrote: > From: Alex Shi <alexs@kernel.org> > > Introduce a few helper functions for conversion to convert create_page_chain() > to use zpdesc, then use zpdesc in replace_sub_page() too. > > Originally-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> > Signed-off-by: Alex Shi <alexs@kernel.org> Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> > --- > mm/zpdesc.h | 6 +++ > mm/zsmalloc.c | 109 ++++++++++++++++++++++++++++++++------------------ > 2 files changed, 76 insertions(+), 39 deletions(-) > > diff --git a/mm/zpdesc.h b/mm/zpdesc.h > index 937de815a4ac..0387f5771dc6 100644 > --- a/mm/zpdesc.h > +++ b/mm/zpdesc.h > @@ -110,4 +110,10 @@ static inline struct zpdesc *pfn_zpdesc(unsigned long pfn) > { > return page_zpdesc(pfn_to_page(pfn)); > } > + > +static inline void __zpdesc_set_movable(struct zpdesc *zpdesc, > + const struct movable_operations *mops) > +{ > + __SetPageMovable(zpdesc_page(zpdesc), mops); > +} > #endif > diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c > index af8a6cd6b814..6beb7cce4c31 100644 > --- a/mm/zsmalloc.c > +++ b/mm/zsmalloc.c > @@ -246,6 +246,35 @@ struct zs_pool { > atomic_t compaction_in_progress; > }; > > +static inline void zpdesc_set_first(struct zpdesc *zpdesc) > +{ > + SetPagePrivate(zpdesc_page(zpdesc)); > +} > + > +static inline void zpdesc_inc_zone_page_state(struct zpdesc *zpdesc) > +{ > + inc_zone_page_state(zpdesc_page(zpdesc), NR_ZSPAGES); > +} > + > +static inline void zpdesc_dec_zone_page_state(struct zpdesc *zpdesc) > +{ > + dec_zone_page_state(zpdesc_page(zpdesc), NR_ZSPAGES); > +} > + > +static inline struct zpdesc *alloc_zpdesc(gfp_t gfp) > +{ > + struct page *page = alloc_page(gfp); > + > + return page_zpdesc(page); > +} > + > +static inline void free_zpdesc(struct zpdesc *zpdesc) > +{ > + struct page *page = zpdesc_page(zpdesc); > + > + __free_page(page); > +} > + > struct zspage { > struct { > unsigned int huge:HUGE_BITS; > @@ -955,35 +984,35 @@ static void init_zspage(struct size_class *class, struct zspage *zspage) > } > > static void create_page_chain(struct size_class *class, struct zspage *zspage, > - struct page *pages[]) > + struct zpdesc *zpdescs[]) > { > int i; > - struct page *page; > - struct page *prev_page = NULL; > - int nr_pages = class->pages_per_zspage; > + struct zpdesc *zpdesc; > + struct zpdesc *prev_zpdesc = NULL; > + int nr_zpdescs = class->pages_per_zspage; > > /* > * Allocate individual pages and link them together as: > - * 1. all pages are linked together using page->index > - * 2. each sub-page point to zspage using page->private > + * 1. all pages are linked together using zpdesc->next > + * 2. each sub-page point to zspage using zpdesc->zspage > * > - * we set PG_private to identify the first page (i.e. no other sub-page > + * we set PG_private to identify the first zpdesc (i.e. no other zpdesc > * has this flag set). > */ > - for (i = 0; i < nr_pages; i++) { > - page = pages[i]; > - set_page_private(page, (unsigned long)zspage); > - page->index = 0; > + for (i = 0; i < nr_zpdescs; i++) { > + zpdesc = zpdescs[i]; > + zpdesc->zspage = zspage; > + zpdesc->next = NULL; > if (i == 0) { > - zspage->first_zpdesc = page_zpdesc(page); > - SetPagePrivate(page); > + zspage->first_zpdesc = zpdesc; > + zpdesc_set_first(zpdesc); > if (unlikely(class->objs_per_zspage == 1 && > class->pages_per_zspage == 1)) > SetZsHugePage(zspage); > } else { > - prev_page->index = (unsigned long)page; > + prev_zpdesc->next = zpdesc; > } > - prev_page = page; > + prev_zpdesc = zpdesc; > } > } > > @@ -995,7 +1024,7 @@ static struct zspage *alloc_zspage(struct zs_pool *pool, > gfp_t gfp) > { > int i; > - struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE]; > + struct zpdesc *zpdescs[ZS_MAX_PAGES_PER_ZSPAGE]; > struct zspage *zspage = cache_alloc_zspage(pool, gfp); > > if (!zspage) > @@ -1005,25 +1034,25 @@ static struct zspage *alloc_zspage(struct zs_pool *pool, > migrate_lock_init(zspage); > > for (i = 0; i < class->pages_per_zspage; i++) { > - struct page *page; > + struct zpdesc *zpdesc; > > - page = alloc_page(gfp); > - if (!page) { > + zpdesc = alloc_zpdesc(gfp); > + if (!zpdesc) { > while (--i >= 0) { > - dec_zone_page_state(pages[i], NR_ZSPAGES); > - __ClearPageZsmalloc(pages[i]); > - __free_page(pages[i]); > + zpdesc_dec_zone_page_state(zpdescs[i]); > + __ClearPageZsmalloc(zpdesc_page(zpdescs[i])); > + free_zpdesc(zpdescs[i]); > } > cache_free_zspage(pool, zspage); > return NULL; > } > - __SetPageZsmalloc(page); > + __SetPageZsmalloc(zpdesc_page(zpdesc)); > > - inc_zone_page_state(page, NR_ZSPAGES); > - pages[i] = page; > + zpdesc_inc_zone_page_state(zpdesc); > + zpdescs[i] = zpdesc; > } > > - create_page_chain(class, zspage, pages); > + create_page_chain(class, zspage, zpdescs); > init_zspage(class, zspage); > zspage->pool = pool; > zspage->class = class->index; > @@ -1744,26 +1773,28 @@ static void migrate_write_unlock(struct zspage *zspage) > static const struct movable_operations zsmalloc_mops; > > static void replace_sub_page(struct size_class *class, struct zspage *zspage, > - struct page *newpage, struct page *oldpage) > + struct zpdesc *newzpdesc, struct zpdesc *oldzpdesc) > { > - struct page *page; > - struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE] = {NULL, }; > + struct zpdesc *zpdesc; > + struct zpdesc *zpdescs[ZS_MAX_PAGES_PER_ZSPAGE] = {NULL, }; > + unsigned int first_obj_offset; > int idx = 0; > > - page = get_first_page(zspage); > + zpdesc = get_first_zpdesc(zspage); > do { > - if (page == oldpage) > - pages[idx] = newpage; > + if (zpdesc == oldzpdesc) > + zpdescs[idx] = newzpdesc; > else > - pages[idx] = page; > + zpdescs[idx] = zpdesc; > idx++; > - } while ((page = get_next_page(page)) != NULL); > + } while ((zpdesc = get_next_zpdesc(zpdesc)) != NULL); > > - create_page_chain(class, zspage, pages); > - set_first_obj_offset(newpage, get_first_obj_offset(oldpage)); > + create_page_chain(class, zspage, zpdescs); > + first_obj_offset = get_first_obj_offset(zpdesc_page(oldzpdesc)); > + set_first_obj_offset(zpdesc_page(newzpdesc), first_obj_offset); > if (unlikely(ZsHugePage(zspage))) > - newpage->index = oldpage->index; > - __SetPageMovable(newpage, &zsmalloc_mops); > + newzpdesc->handle = oldzpdesc->handle; > + __zpdesc_set_movable(newzpdesc, &zsmalloc_mops); > } > > static bool zs_page_isolate(struct page *page, isolate_mode_t mode) > @@ -1836,7 +1867,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page, > } > kunmap_local(s_addr); > > - replace_sub_page(class, zspage, newpage, page); > + replace_sub_page(class, zspage, page_zpdesc(newpage), page_zpdesc(page)); > /* > * Since we complete the data copy and set up new zspage structure, > * it's okay to release migration_lock. > -- > 2.45.2 > >
diff --git a/mm/zpdesc.h b/mm/zpdesc.h index 937de815a4ac..0387f5771dc6 100644 --- a/mm/zpdesc.h +++ b/mm/zpdesc.h @@ -110,4 +110,10 @@ static inline struct zpdesc *pfn_zpdesc(unsigned long pfn) { return page_zpdesc(pfn_to_page(pfn)); } + +static inline void __zpdesc_set_movable(struct zpdesc *zpdesc, + const struct movable_operations *mops) +{ + __SetPageMovable(zpdesc_page(zpdesc), mops); +} #endif diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index af8a6cd6b814..6beb7cce4c31 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -246,6 +246,35 @@ struct zs_pool { atomic_t compaction_in_progress; }; +static inline void zpdesc_set_first(struct zpdesc *zpdesc) +{ + SetPagePrivate(zpdesc_page(zpdesc)); +} + +static inline void zpdesc_inc_zone_page_state(struct zpdesc *zpdesc) +{ + inc_zone_page_state(zpdesc_page(zpdesc), NR_ZSPAGES); +} + +static inline void zpdesc_dec_zone_page_state(struct zpdesc *zpdesc) +{ + dec_zone_page_state(zpdesc_page(zpdesc), NR_ZSPAGES); +} + +static inline struct zpdesc *alloc_zpdesc(gfp_t gfp) +{ + struct page *page = alloc_page(gfp); + + return page_zpdesc(page); +} + +static inline void free_zpdesc(struct zpdesc *zpdesc) +{ + struct page *page = zpdesc_page(zpdesc); + + __free_page(page); +} + struct zspage { struct { unsigned int huge:HUGE_BITS; @@ -955,35 +984,35 @@ static void init_zspage(struct size_class *class, struct zspage *zspage) } static void create_page_chain(struct size_class *class, struct zspage *zspage, - struct page *pages[]) + struct zpdesc *zpdescs[]) { int i; - struct page *page; - struct page *prev_page = NULL; - int nr_pages = class->pages_per_zspage; + struct zpdesc *zpdesc; + struct zpdesc *prev_zpdesc = NULL; + int nr_zpdescs = class->pages_per_zspage; /* * Allocate individual pages and link them together as: - * 1. all pages are linked together using page->index - * 2. each sub-page point to zspage using page->private + * 1. all pages are linked together using zpdesc->next + * 2. each sub-page point to zspage using zpdesc->zspage * - * we set PG_private to identify the first page (i.e. no other sub-page + * we set PG_private to identify the first zpdesc (i.e. no other zpdesc * has this flag set). */ - for (i = 0; i < nr_pages; i++) { - page = pages[i]; - set_page_private(page, (unsigned long)zspage); - page->index = 0; + for (i = 0; i < nr_zpdescs; i++) { + zpdesc = zpdescs[i]; + zpdesc->zspage = zspage; + zpdesc->next = NULL; if (i == 0) { - zspage->first_zpdesc = page_zpdesc(page); - SetPagePrivate(page); + zspage->first_zpdesc = zpdesc; + zpdesc_set_first(zpdesc); if (unlikely(class->objs_per_zspage == 1 && class->pages_per_zspage == 1)) SetZsHugePage(zspage); } else { - prev_page->index = (unsigned long)page; + prev_zpdesc->next = zpdesc; } - prev_page = page; + prev_zpdesc = zpdesc; } } @@ -995,7 +1024,7 @@ static struct zspage *alloc_zspage(struct zs_pool *pool, gfp_t gfp) { int i; - struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE]; + struct zpdesc *zpdescs[ZS_MAX_PAGES_PER_ZSPAGE]; struct zspage *zspage = cache_alloc_zspage(pool, gfp); if (!zspage) @@ -1005,25 +1034,25 @@ static struct zspage *alloc_zspage(struct zs_pool *pool, migrate_lock_init(zspage); for (i = 0; i < class->pages_per_zspage; i++) { - struct page *page; + struct zpdesc *zpdesc; - page = alloc_page(gfp); - if (!page) { + zpdesc = alloc_zpdesc(gfp); + if (!zpdesc) { while (--i >= 0) { - dec_zone_page_state(pages[i], NR_ZSPAGES); - __ClearPageZsmalloc(pages[i]); - __free_page(pages[i]); + zpdesc_dec_zone_page_state(zpdescs[i]); + __ClearPageZsmalloc(zpdesc_page(zpdescs[i])); + free_zpdesc(zpdescs[i]); } cache_free_zspage(pool, zspage); return NULL; } - __SetPageZsmalloc(page); + __SetPageZsmalloc(zpdesc_page(zpdesc)); - inc_zone_page_state(page, NR_ZSPAGES); - pages[i] = page; + zpdesc_inc_zone_page_state(zpdesc); + zpdescs[i] = zpdesc; } - create_page_chain(class, zspage, pages); + create_page_chain(class, zspage, zpdescs); init_zspage(class, zspage); zspage->pool = pool; zspage->class = class->index; @@ -1744,26 +1773,28 @@ static void migrate_write_unlock(struct zspage *zspage) static const struct movable_operations zsmalloc_mops; static void replace_sub_page(struct size_class *class, struct zspage *zspage, - struct page *newpage, struct page *oldpage) + struct zpdesc *newzpdesc, struct zpdesc *oldzpdesc) { - struct page *page; - struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE] = {NULL, }; + struct zpdesc *zpdesc; + struct zpdesc *zpdescs[ZS_MAX_PAGES_PER_ZSPAGE] = {NULL, }; + unsigned int first_obj_offset; int idx = 0; - page = get_first_page(zspage); + zpdesc = get_first_zpdesc(zspage); do { - if (page == oldpage) - pages[idx] = newpage; + if (zpdesc == oldzpdesc) + zpdescs[idx] = newzpdesc; else - pages[idx] = page; + zpdescs[idx] = zpdesc; idx++; - } while ((page = get_next_page(page)) != NULL); + } while ((zpdesc = get_next_zpdesc(zpdesc)) != NULL); - create_page_chain(class, zspage, pages); - set_first_obj_offset(newpage, get_first_obj_offset(oldpage)); + create_page_chain(class, zspage, zpdescs); + first_obj_offset = get_first_obj_offset(zpdesc_page(oldzpdesc)); + set_first_obj_offset(zpdesc_page(newzpdesc), first_obj_offset); if (unlikely(ZsHugePage(zspage))) - newpage->index = oldpage->index; - __SetPageMovable(newpage, &zsmalloc_mops); + newzpdesc->handle = oldzpdesc->handle; + __zpdesc_set_movable(newzpdesc, &zsmalloc_mops); } static bool zs_page_isolate(struct page *page, isolate_mode_t mode) @@ -1836,7 +1867,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page, } kunmap_local(s_addr); - replace_sub_page(class, zspage, newpage, page); + replace_sub_page(class, zspage, page_zpdesc(newpage), page_zpdesc(page)); /* * Since we complete the data copy and set up new zspage structure, * it's okay to release migration_lock.