diff mbox series

[RFC,v2,21/21] mm/zsmalloc: convert {get,set}_first_obj_offset() to use zsdesc

Message ID 20230713042037.980211-22-42.hyeyoo@gmail.com (mailing list archive)
State New
Headers show
Series mm/zsmalloc: Split zsdesc from struct page | expand

Commit Message

Hyeonggon Yoo July 13, 2023, 4:20 a.m. UTC
Now that all users of {get,set}_first_obj_offset() are converted
to use zsdesc, convert them to use zsdesc.

Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
---
 mm/zsmalloc.c | 18 +++++++++---------
 1 file changed, 9 insertions(+), 9 deletions(-)
diff mbox series

Patch

diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 3933c023c3c9..7ac5d63e10a5 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -606,14 +606,14 @@  static struct zsdesc *get_first_zsdesc(struct zspage *zspage)
 	return first_zsdesc;
 }
 
-static inline unsigned int get_first_obj_offset(struct page *page)
+static inline unsigned int get_first_obj_offset(struct zsdesc *zsdesc)
 {
-	return page->page_type;
+	return zsdesc->first_obj_offset;
 }
 
-static inline void set_first_obj_offset(struct page *page, unsigned int offset)
+static inline void set_first_obj_offset(struct zsdesc *zsdesc, unsigned int offset)
 {
-	page->page_type = offset;
+	zsdesc->first_obj_offset = offset;
 }
 
 static inline unsigned int get_freeobj(struct zspage *zspage)
@@ -1049,7 +1049,7 @@  static void init_zspage(struct size_class *class, struct zspage *zspage)
 		struct link_free *link;
 		void *vaddr;
 
-		set_first_obj_offset(zsdesc_page(zsdesc), off);
+		set_first_obj_offset(zsdesc, off);
 
 		vaddr = zsdesc_kmap_atomic(zsdesc);
 		link = (struct link_free *)vaddr + off / sizeof(*link);
@@ -1699,7 +1699,7 @@  static unsigned long find_alloced_obj(struct size_class *class,
 	unsigned long handle = 0;
 	void *addr = zsdesc_kmap_atomic(zsdesc);
 
-	offset = get_first_obj_offset(zsdesc_page(zsdesc));
+	offset = get_first_obj_offset(zsdesc);
 	offset += class->size * index;
 
 	while (offset < PAGE_SIZE) {
@@ -1910,8 +1910,8 @@  static void replace_sub_page(struct size_class *class, struct zspage *zspage,
 	} while ((zsdesc = get_next_zsdesc(zsdesc)) != NULL);
 
 	create_page_chain(class, zspage, zsdescs);
-	first_obj_offset = get_first_obj_offset(zsdesc_page(old_zsdesc));
-	set_first_obj_offset(zsdesc_page(new_zsdesc), first_obj_offset);
+	first_obj_offset = get_first_obj_offset(old_zsdesc);
+	set_first_obj_offset(new_zsdesc, first_obj_offset);
 	if (unlikely(ZsHugePage(zspage)))
 		new_zsdesc->handle = old_zsdesc->handle;
 	zsdesc_set_movable(new_zsdesc);
@@ -1975,7 +1975,7 @@  static int zs_page_migrate(struct page *newpage, struct page *page,
 	/* the migrate_write_lock protects zpage access via zs_map_object */
 	migrate_write_lock(zspage);
 
-	offset = get_first_obj_offset(zsdesc_page(zsdesc));
+	offset = get_first_obj_offset(zsdesc);
 	s_addr = zsdesc_kmap_atomic(zsdesc);
 
 	/*