Message ID | 20230220132218.546369-3-42.hyeyoo@gmail.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | mm/zsmalloc: Split zsdesc from struct page | expand |
On Mon, Feb 20, 2023 at 01:21:55PM +0000, Hyeonggon Yoo wrote: > Introduce utility functions for zsdesc to avoid directly accessing fields > of struct page. > > zsdesc_page() is defined this way to preserve constness. page_zsdesc() does I'd suggest "zsdesc_page() is defined with _Generic to preserve constness" and add a line break before page_zsdesc(). > not call compound_head() because zsdesc is always a base page. > > Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> > --- > mm/zsmalloc.c | 120 +++++++++++++++++++++++++++++++++++++++++++++++++- > 1 file changed, 118 insertions(+), 2 deletions(-) > > diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c > index e2e34992c439..4af9f87cafb7 100644 > --- a/mm/zsmalloc.c > +++ b/mm/zsmalloc.c > @@ -332,6 +332,124 @@ ZSDESC_MATCH(_refcount, _refcount); > #undef ZSDESC_MATCH > static_assert(sizeof(struct zsdesc) <= sizeof(struct page)); I didn't do through check if it's feasible, but I think it would be better to add helpers along with their usage. For instance, move definition of zdesc_page() and page_zsdesc() to "mm/zsmalloc: replace first_page to first_zsdesc in struct zspage" and so on. > +#define zsdesc_page(zdesc) (_Generic((zdesc), \ > + const struct zsdesc *: (const struct page *)zdesc, \ > + struct zsdesc *: (struct page *)zdesc)) > + > +static inline struct zsdesc *page_zsdesc(struct page *page) > +{ > + return (struct zsdesc *)page; > +} > + > +static inline unsigned long zsdesc_pfn(const struct zsdesc *zsdesc) > +{ > + return page_to_pfn(zsdesc_page(zsdesc)); > +} > + > +static inline struct zsdesc *pfn_zsdesc(unsigned long pfn) > +{ > + return page_zsdesc(pfn_to_page(pfn)); > +} > + > +static inline struct zspage *zsdesc_zspage(const struct zsdesc *zsdesc) > +{ > + return (struct zspage *)page_private(zsdesc_page(zsdesc)); > +} > + > +static inline int trylock_zsdesc(struct zsdesc *zsdesc) > +{ > + return trylock_page(zsdesc_page(zsdesc)); > +} > + > +static inline void unlock_zsdesc(struct zsdesc *zsdesc) > +{ > + return unlock_page(zsdesc_page(zsdesc)); > +} > + > +static inline struct zone *zsdesc_zone(struct zsdesc *zsdesc) > +{ > + return page_zone(zsdesc_page(zsdesc)); > +} > + > +static inline void wait_on_zsdesc_locked(struct zsdesc *zsdesc) > +{ > + wait_on_page_locked(zsdesc_page(zsdesc)); > +} > + > +static inline void zsdesc_get(struct zsdesc *zsdesc) > +{ > + struct folio *folio = (struct folio *)zsdesc; > + > + folio_get(folio); > +} > + > +static inline void zsdesc_put(struct zsdesc *zsdesc) > +{ > + struct folio *folio = (struct folio *)zsdesc; > + > + folio_put(folio); > +} > + > +static inline void *zsdesc_kmap_atomic(struct zsdesc *zsdesc) > +{ > + return kmap_atomic(zsdesc_page(zsdesc)); > +} > + > +static inline void zsdesc_set_zspage(struct zsdesc *zsdesc, struct zspage *zspage) > +{ > + set_page_private(zsdesc_page(zsdesc), (unsigned long)zspage); > +} > + > +static inline void zsdesc_set_first(struct zsdesc *zsdesc) > +{ > + SetPagePrivate(zsdesc_page(zsdesc)); > +} > + > +static inline bool zsdesc_is_locked(struct zsdesc *zsdesc) > +{ > + return PageLocked(zsdesc_page(zsdesc)); > +} > + > +static inline bool zsdesc_is_isolated(struct zsdesc *zsdesc) > +{ > + return PageIsolated(zsdesc_page(zsdesc)); > +} > + > +static inline void zsdesc_inc_zone_page_state(struct zsdesc *zsdesc) > +{ > + inc_zone_page_state(zsdesc_page(zsdesc), NR_ZSPAGES); > +} > + > +static inline void zsdesc_dec_zone_page_state(struct zsdesc *zsdesc) > +{ > + dec_zone_page_state(zsdesc_page(zsdesc), NR_ZSPAGES); > +} > + > +static inline struct zsdesc *alloc_zsdesc(gfp_t gfp) > +{ > + struct page *page = alloc_page(gfp); > + > + zsdesc_inc_zone_page_state(page_zsdesc(page)); > + return page_zsdesc(page); > +} > + > +static inline void free_zsdesc(struct zsdesc *zsdesc) > +{ > + struct page *page = zsdesc_page(zsdesc); > + > + zsdesc_dec_zone_page_state(page_zsdesc(page)); > + __free_page(page); > +} > + > +static const struct movable_operations zsmalloc_mops; > + > +static inline void zsdesc_set_movable(struct zsdesc *zsdesc) > +{ > + struct page *page = zsdesc_page(zsdesc); > + > + __SetPageMovable(page, &zsmalloc_mops); > +} > + > /* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */ > static void SetZsHugePage(struct zspage *zspage) > { > @@ -2012,8 +2130,6 @@ static void dec_zspage_isolation(struct zspage *zspage) > zspage->isolated--; > } > > -static const struct movable_operations zsmalloc_mops; > - > static void replace_sub_page(struct size_class *class, struct zspage *zspage, > struct page *newpage, struct page *oldpage) > { > -- > 2.25.1 > >
On Mon, Feb 27, 2023 at 05:30:14PM +0200, Mike Rapoport wrote: > On Mon, Feb 20, 2023 at 01:21:55PM +0000, Hyeonggon Yoo wrote: > > Introduce utility functions for zsdesc to avoid directly accessing fields > > of struct page. > > > > zsdesc_page() is defined this way to preserve constness. page_zsdesc() does > > I'd suggest "zsdesc_page() is defined with _Generic to preserve constness" > and add a line break before page_zsdesc(). Will do in next version. > > > not call compound_head() because zsdesc is always a base page. > > > > Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> > > --- > > mm/zsmalloc.c | 120 +++++++++++++++++++++++++++++++++++++++++++++++++- > > 1 file changed, 118 insertions(+), 2 deletions(-) > > > > diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c > > index e2e34992c439..4af9f87cafb7 100644 > > --- a/mm/zsmalloc.c > > +++ b/mm/zsmalloc.c > > @@ -332,6 +332,124 @@ ZSDESC_MATCH(_refcount, _refcount); > > #undef ZSDESC_MATCH > > static_assert(sizeof(struct zsdesc) <= sizeof(struct page)); > > I didn't do through check if it's feasible, but I think it would be better > to add helpers along with their usage. > > For instance, move definition of zdesc_page() and page_zsdesc() to > "mm/zsmalloc: replace first_page to first_zsdesc in struct zspage" and so > on. Sure, that would be easier to review. Will do in next version. Thanks! Hyeonggon > > > +#define zsdesc_page(zdesc) (_Generic((zdesc), \ > > + const struct zsdesc *: (const struct page *)zdesc, \ > > + struct zsdesc *: (struct page *)zdesc)) > > + > > +static inline struct zsdesc *page_zsdesc(struct page *page) > > +{ > > + return (struct zsdesc *)page; > > +} > > + > > +static inline unsigned long zsdesc_pfn(const struct zsdesc *zsdesc) > > +{ > > + return page_to_pfn(zsdesc_page(zsdesc)); > > +} > > + > > +static inline struct zsdesc *pfn_zsdesc(unsigned long pfn) > > +{ > > + return page_zsdesc(pfn_to_page(pfn)); > > +} > > + > > +static inline struct zspage *zsdesc_zspage(const struct zsdesc *zsdesc) > > +{ > > + return (struct zspage *)page_private(zsdesc_page(zsdesc)); > > +} > > + > > +static inline int trylock_zsdesc(struct zsdesc *zsdesc) > > +{ > > + return trylock_page(zsdesc_page(zsdesc)); > > +} > > + > > +static inline void unlock_zsdesc(struct zsdesc *zsdesc) > > +{ > > + return unlock_page(zsdesc_page(zsdesc)); > > +} > > + > > +static inline struct zone *zsdesc_zone(struct zsdesc *zsdesc) > > +{ > > + return page_zone(zsdesc_page(zsdesc)); > > +} > > + > > +static inline void wait_on_zsdesc_locked(struct zsdesc *zsdesc) > > +{ > > + wait_on_page_locked(zsdesc_page(zsdesc)); > > +} > > + > > +static inline void zsdesc_get(struct zsdesc *zsdesc) > > +{ > > + struct folio *folio = (struct folio *)zsdesc; > > + > > + folio_get(folio); > > +} > > + > > +static inline void zsdesc_put(struct zsdesc *zsdesc) > > +{ > > + struct folio *folio = (struct folio *)zsdesc; > > + > > + folio_put(folio); > > +} > > + > > +static inline void *zsdesc_kmap_atomic(struct zsdesc *zsdesc) > > +{ > > + return kmap_atomic(zsdesc_page(zsdesc)); > > +} > > + > > +static inline void zsdesc_set_zspage(struct zsdesc *zsdesc, struct zspage *zspage) > > +{ > > + set_page_private(zsdesc_page(zsdesc), (unsigned long)zspage); > > +} > > + > > +static inline void zsdesc_set_first(struct zsdesc *zsdesc) > > +{ > > + SetPagePrivate(zsdesc_page(zsdesc)); > > +} > > + > > +static inline bool zsdesc_is_locked(struct zsdesc *zsdesc) > > +{ > > + return PageLocked(zsdesc_page(zsdesc)); > > +} > > + > > +static inline bool zsdesc_is_isolated(struct zsdesc *zsdesc) > > +{ > > + return PageIsolated(zsdesc_page(zsdesc)); > > +} > > + > > +static inline void zsdesc_inc_zone_page_state(struct zsdesc *zsdesc) > > +{ > > + inc_zone_page_state(zsdesc_page(zsdesc), NR_ZSPAGES); > > +} > > + > > +static inline void zsdesc_dec_zone_page_state(struct zsdesc *zsdesc) > > +{ > > + dec_zone_page_state(zsdesc_page(zsdesc), NR_ZSPAGES); > > +} > > + > > +static inline struct zsdesc *alloc_zsdesc(gfp_t gfp) > > +{ > > + struct page *page = alloc_page(gfp); > > + > > + zsdesc_inc_zone_page_state(page_zsdesc(page)); > > + return page_zsdesc(page); > > +} > > + > > +static inline void free_zsdesc(struct zsdesc *zsdesc) > > +{ > > + struct page *page = zsdesc_page(zsdesc); > > + > > + zsdesc_dec_zone_page_state(page_zsdesc(page)); > > + __free_page(page); > > +} > > + > > +static const struct movable_operations zsmalloc_mops; > > + > > +static inline void zsdesc_set_movable(struct zsdesc *zsdesc) > > +{ > > + struct page *page = zsdesc_page(zsdesc); > > + > > + __SetPageMovable(page, &zsmalloc_mops); > > +} > > + > > /* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */ > > static void SetZsHugePage(struct zspage *zspage) > > { > > @@ -2012,8 +2130,6 @@ static void dec_zspage_isolation(struct zspage *zspage) > > zspage->isolated--; > > } > > > > -static const struct movable_operations zsmalloc_mops; > > - > > static void replace_sub_page(struct size_class *class, struct zspage *zspage, > > struct page *newpage, struct page *oldpage) > > { > > -- > > 2.25.1 > > > > > > -- > Sincerely yours, > Mike.
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index e2e34992c439..4af9f87cafb7 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -332,6 +332,124 @@ ZSDESC_MATCH(_refcount, _refcount); #undef ZSDESC_MATCH static_assert(sizeof(struct zsdesc) <= sizeof(struct page)); +#define zsdesc_page(zdesc) (_Generic((zdesc), \ + const struct zsdesc *: (const struct page *)zdesc, \ + struct zsdesc *: (struct page *)zdesc)) + +static inline struct zsdesc *page_zsdesc(struct page *page) +{ + return (struct zsdesc *)page; +} + +static inline unsigned long zsdesc_pfn(const struct zsdesc *zsdesc) +{ + return page_to_pfn(zsdesc_page(zsdesc)); +} + +static inline struct zsdesc *pfn_zsdesc(unsigned long pfn) +{ + return page_zsdesc(pfn_to_page(pfn)); +} + +static inline struct zspage *zsdesc_zspage(const struct zsdesc *zsdesc) +{ + return (struct zspage *)page_private(zsdesc_page(zsdesc)); +} + +static inline int trylock_zsdesc(struct zsdesc *zsdesc) +{ + return trylock_page(zsdesc_page(zsdesc)); +} + +static inline void unlock_zsdesc(struct zsdesc *zsdesc) +{ + return unlock_page(zsdesc_page(zsdesc)); +} + +static inline struct zone *zsdesc_zone(struct zsdesc *zsdesc) +{ + return page_zone(zsdesc_page(zsdesc)); +} + +static inline void wait_on_zsdesc_locked(struct zsdesc *zsdesc) +{ + wait_on_page_locked(zsdesc_page(zsdesc)); +} + +static inline void zsdesc_get(struct zsdesc *zsdesc) +{ + struct folio *folio = (struct folio *)zsdesc; + + folio_get(folio); +} + +static inline void zsdesc_put(struct zsdesc *zsdesc) +{ + struct folio *folio = (struct folio *)zsdesc; + + folio_put(folio); +} + +static inline void *zsdesc_kmap_atomic(struct zsdesc *zsdesc) +{ + return kmap_atomic(zsdesc_page(zsdesc)); +} + +static inline void zsdesc_set_zspage(struct zsdesc *zsdesc, struct zspage *zspage) +{ + set_page_private(zsdesc_page(zsdesc), (unsigned long)zspage); +} + +static inline void zsdesc_set_first(struct zsdesc *zsdesc) +{ + SetPagePrivate(zsdesc_page(zsdesc)); +} + +static inline bool zsdesc_is_locked(struct zsdesc *zsdesc) +{ + return PageLocked(zsdesc_page(zsdesc)); +} + +static inline bool zsdesc_is_isolated(struct zsdesc *zsdesc) +{ + return PageIsolated(zsdesc_page(zsdesc)); +} + +static inline void zsdesc_inc_zone_page_state(struct zsdesc *zsdesc) +{ + inc_zone_page_state(zsdesc_page(zsdesc), NR_ZSPAGES); +} + +static inline void zsdesc_dec_zone_page_state(struct zsdesc *zsdesc) +{ + dec_zone_page_state(zsdesc_page(zsdesc), NR_ZSPAGES); +} + +static inline struct zsdesc *alloc_zsdesc(gfp_t gfp) +{ + struct page *page = alloc_page(gfp); + + zsdesc_inc_zone_page_state(page_zsdesc(page)); + return page_zsdesc(page); +} + +static inline void free_zsdesc(struct zsdesc *zsdesc) +{ + struct page *page = zsdesc_page(zsdesc); + + zsdesc_dec_zone_page_state(page_zsdesc(page)); + __free_page(page); +} + +static const struct movable_operations zsmalloc_mops; + +static inline void zsdesc_set_movable(struct zsdesc *zsdesc) +{ + struct page *page = zsdesc_page(zsdesc); + + __SetPageMovable(page, &zsmalloc_mops); +} + /* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */ static void SetZsHugePage(struct zspage *zspage) { @@ -2012,8 +2130,6 @@ static void dec_zspage_isolation(struct zspage *zspage) zspage->isolated--; } -static const struct movable_operations zsmalloc_mops; - static void replace_sub_page(struct size_class *class, struct zspage *zspage, struct page *newpage, struct page *oldpage) {
Introduce utility functions for zsdesc to avoid directly accessing fields of struct page. zsdesc_page() is defined this way to preserve constness. page_zsdesc() does not call compound_head() because zsdesc is always a base page. Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> --- mm/zsmalloc.c | 120 +++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 118 insertions(+), 2 deletions(-)