Message ID | 20241205175000.3187069-18-willy@infradead.org (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | Add zpdesc memory descriptor for zswap.zpool | expand |
On Thu, Dec 05, 2024 at 05:49:54PM +0000, Matthew Wilcox (Oracle) wrote: > From: Alex Shi <alexs@kernel.org> > > Now that all users of get/set_first_obj_offset() are converted > to use zpdesc, convert them to take zpdesc. > > Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> > Signed-off-by: Alex Shi <alexs@kernel.org> > --- > mm/zsmalloc.c | 28 ++++++++++++++-------------- > 1 file changed, 14 insertions(+), 14 deletions(-) > > diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c > index 16e3d6705563..a15bdcc25bb3 100644 > --- a/mm/zsmalloc.c > +++ b/mm/zsmalloc.c > @@ -496,20 +496,20 @@ static struct zpdesc *get_first_zpdesc(struct zspage *zspage) > > #define FIRST_OBJ_PAGE_TYPE_MASK 0xffffff > > -static inline unsigned int get_first_obj_offset(struct page *page) > +static inline unsigned int get_first_obj_offset(struct zpdesc *zpdesc) > { > - VM_WARN_ON_ONCE(!PageZsmalloc(page)); > - return page->page_type & FIRST_OBJ_PAGE_TYPE_MASK; > + VM_WARN_ON_ONCE(!PageZsmalloc(zpdesc_page(zpdesc))); > + return zpdesc->first_obj_offset & FIRST_OBJ_PAGE_TYPE_MASK; > } > > -static inline void set_first_obj_offset(struct page *page, unsigned int offset) > +static inline void set_first_obj_offset(struct zpdesc *zpdesc, unsigned int offset) > { > - /* With 24 bits available, we can support offsets into 16 MiB pages. */ > - BUILD_BUG_ON(PAGE_SIZE > SZ_16M); > - VM_WARN_ON_ONCE(!PageZsmalloc(page)); > + /* With 16 bit available, we can support offsets into 64 KiB pages. */ > + BUILD_BUG_ON(PAGE_SIZE > SZ_64K); > + VM_WARN_ON_ONCE(!PageZsmalloc(zpdesc_page(zpdesc))); It seems to be a mistake that occurred during the rebase process from v6 to v7? > VM_WARN_ON_ONCE(offset & ~FIRST_OBJ_PAGE_TYPE_MASK); > - page->page_type &= ~FIRST_OBJ_PAGE_TYPE_MASK; > - page->page_type |= offset & FIRST_OBJ_PAGE_TYPE_MASK; > + zpdesc->first_obj_offset &= ~FIRST_OBJ_PAGE_TYPE_MASK; > + zpdesc->first_obj_offset |= offset & FIRST_OBJ_PAGE_TYPE_MASK; > } > > static inline unsigned int get_freeobj(struct zspage *zspage) > @@ -929,7 +929,7 @@ static void init_zspage(struct size_class *class, struct zspage *zspage) > struct link_free *link; > void *vaddr; > > - set_first_obj_offset(zpdesc_page(zpdesc), off); > + set_first_obj_offset(zpdesc, off); > > vaddr = kmap_local_zpdesc(zpdesc); > link = (struct link_free *)vaddr + off / sizeof(*link); > @@ -1574,7 +1574,7 @@ static unsigned long find_alloced_obj(struct size_class *class, > unsigned long handle = 0; > void *addr = kmap_local_zpdesc(zpdesc); > > - offset = get_first_obj_offset(zpdesc_page(zpdesc)); > + offset = get_first_obj_offset(zpdesc); > offset += class->size * index; > > while (offset < PAGE_SIZE) { > @@ -1769,8 +1769,8 @@ static void replace_sub_page(struct size_class *class, struct zspage *zspage, > } while ((zpdesc = get_next_zpdesc(zpdesc)) != NULL); > > create_page_chain(class, zspage, zpdescs); > - first_obj_offset = get_first_obj_offset(zpdesc_page(oldzpdesc)); > - set_first_obj_offset(zpdesc_page(newzpdesc), first_obj_offset); > + first_obj_offset = get_first_obj_offset(oldzpdesc); > + set_first_obj_offset(newzpdesc, first_obj_offset); > if (unlikely(ZsHugePage(zspage))) > newzpdesc->handle = oldzpdesc->handle; > __zpdesc_set_movable(newzpdesc, &zsmalloc_mops); > @@ -1825,7 +1825,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page, > /* the migrate_write_lock protects zpage access via zs_map_object */ > migrate_write_lock(zspage); > > - offset = get_first_obj_offset(zpdesc_page(zpdesc)); > + offset = get_first_obj_offset(zpdesc); > s_addr = kmap_local_zpdesc(zpdesc); > > /* > -- > 2.45.2 > >
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 16e3d6705563..a15bdcc25bb3 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -496,20 +496,20 @@ static struct zpdesc *get_first_zpdesc(struct zspage *zspage) #define FIRST_OBJ_PAGE_TYPE_MASK 0xffffff -static inline unsigned int get_first_obj_offset(struct page *page) +static inline unsigned int get_first_obj_offset(struct zpdesc *zpdesc) { - VM_WARN_ON_ONCE(!PageZsmalloc(page)); - return page->page_type & FIRST_OBJ_PAGE_TYPE_MASK; + VM_WARN_ON_ONCE(!PageZsmalloc(zpdesc_page(zpdesc))); + return zpdesc->first_obj_offset & FIRST_OBJ_PAGE_TYPE_MASK; } -static inline void set_first_obj_offset(struct page *page, unsigned int offset) +static inline void set_first_obj_offset(struct zpdesc *zpdesc, unsigned int offset) { - /* With 24 bits available, we can support offsets into 16 MiB pages. */ - BUILD_BUG_ON(PAGE_SIZE > SZ_16M); - VM_WARN_ON_ONCE(!PageZsmalloc(page)); + /* With 16 bit available, we can support offsets into 64 KiB pages. */ + BUILD_BUG_ON(PAGE_SIZE > SZ_64K); + VM_WARN_ON_ONCE(!PageZsmalloc(zpdesc_page(zpdesc))); VM_WARN_ON_ONCE(offset & ~FIRST_OBJ_PAGE_TYPE_MASK); - page->page_type &= ~FIRST_OBJ_PAGE_TYPE_MASK; - page->page_type |= offset & FIRST_OBJ_PAGE_TYPE_MASK; + zpdesc->first_obj_offset &= ~FIRST_OBJ_PAGE_TYPE_MASK; + zpdesc->first_obj_offset |= offset & FIRST_OBJ_PAGE_TYPE_MASK; } static inline unsigned int get_freeobj(struct zspage *zspage) @@ -929,7 +929,7 @@ static void init_zspage(struct size_class *class, struct zspage *zspage) struct link_free *link; void *vaddr; - set_first_obj_offset(zpdesc_page(zpdesc), off); + set_first_obj_offset(zpdesc, off); vaddr = kmap_local_zpdesc(zpdesc); link = (struct link_free *)vaddr + off / sizeof(*link); @@ -1574,7 +1574,7 @@ static unsigned long find_alloced_obj(struct size_class *class, unsigned long handle = 0; void *addr = kmap_local_zpdesc(zpdesc); - offset = get_first_obj_offset(zpdesc_page(zpdesc)); + offset = get_first_obj_offset(zpdesc); offset += class->size * index; while (offset < PAGE_SIZE) { @@ -1769,8 +1769,8 @@ static void replace_sub_page(struct size_class *class, struct zspage *zspage, } while ((zpdesc = get_next_zpdesc(zpdesc)) != NULL); create_page_chain(class, zspage, zpdescs); - first_obj_offset = get_first_obj_offset(zpdesc_page(oldzpdesc)); - set_first_obj_offset(zpdesc_page(newzpdesc), first_obj_offset); + first_obj_offset = get_first_obj_offset(oldzpdesc); + set_first_obj_offset(newzpdesc, first_obj_offset); if (unlikely(ZsHugePage(zspage))) newzpdesc->handle = oldzpdesc->handle; __zpdesc_set_movable(newzpdesc, &zsmalloc_mops); @@ -1825,7 +1825,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page, /* the migrate_write_lock protects zpage access via zs_map_object */ migrate_write_lock(zspage); - offset = get_first_obj_offset(zpdesc_page(zpdesc)); + offset = get_first_obj_offset(zpdesc); s_addr = kmap_local_zpdesc(zpdesc); /*