diff mbox series

[RFC,v2,01/21] mm/zsmalloc: create new struct zsdesc

Message ID 20230713042037.980211-2-42.hyeyoo@gmail.com (mailing list archive)
State New
Headers show
Series mm/zsmalloc: Split zsdesc from struct page | expand

Commit Message

Hyeonggon Yoo July 13, 2023, 4:20 a.m. UTC
Currently zsmalloc reuses fields of struct page. As part of simplifying
struct page, create own type for zsmalloc called zsdesc.

Remove comments about how zsmalloc reuses fields of struct page, because
zsdesc uses more intuitive names.

Note that zsmalloc does not use PG_owner_priv_v1 after commit a41ec880aa7b
("zsmalloc: move huge compressed obj from page to zspage"). Thus only
document how zsmalloc uses PG_private flag.

It is very tempting to rearrange zsdesc, but the three words after flags
field are not available for zsmalloc. Add comments about that.

Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
---
 mm/zsmalloc.c | 63 +++++++++++++++++++++++++++++++++++++--------------
 1 file changed, 46 insertions(+), 17 deletions(-)

Comments

Sergey Senozhatsky July 20, 2023, 7:47 a.m. UTC | #1
On (23/07/13 13:20), Hyeonggon Yoo wrote:
>  /*
> @@ -264,6 +247,52 @@ struct mapping_area {
>  	enum zs_mapmode vm_mm; /* mapping mode */
>  };
>  

struct zspage already has a zsdesc member at this point, so I'd prefer
to move struct zsdesc definition before struct zspage.

> +/*
> + * struct zsdesc - memory descriptor for zsmalloc memory
> + *
> + * This struct overlays struct page for now. Do not modify without a
> + * good understanding of the issues.
> + *
> + * Usage of struct page flags on zsdesc:
> + *	PG_private: identifies the first component zsdesc
> + */
> +struct zsdesc {
> +	unsigned long flags;
> +
> +	/*
> +	 * Although not used by zsmalloc, this field is used by non-LRU page migration
> +	 * code. Leave it unused.
> +	 */
> +	struct list_head lru;
> +
> +	/* Always points to zsmalloc_mops with PAGE_MAPPING_MOVABLE set */
> +	struct movable_operations *mops;
> +
> +	union {
> +		/* linked list of all zsdescs in a zspage */
> +		struct zsdesc *next;
> +		/* for huge zspages */
> +		unsigned long handle;
> +	};
> +	struct zspage *zspage;
> +	unsigned int first_obj_offset;
> +	unsigned int _refcount;
> +};
> +
> +#define ZSDESC_MATCH(pg, zs) \
> +	static_assert(offsetof(struct page, pg) == offsetof(struct zsdesc, zs))
> +
> +ZSDESC_MATCH(flags, flags);
> +ZSDESC_MATCH(lru, lru);
> +ZSDESC_MATCH(mapping, mops);
> +ZSDESC_MATCH(index, next);
> +ZSDESC_MATCH(index, handle);
> +ZSDESC_MATCH(private, zspage);
> +ZSDESC_MATCH(page_type, first_obj_offset);
> +ZSDESC_MATCH(_refcount, _refcount);
> +#undef ZSDESC_MATCH
> +static_assert(sizeof(struct zsdesc) <= sizeof(struct page));
> +
>  /* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */
>  static void SetZsHugePage(struct zspage *zspage)
>  {
> -- 
> 2.41.0
>
diff mbox series

Patch

diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 32f5bc4074df..2204bea4f289 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -11,23 +11,6 @@ 
  * Released under the terms of GNU General Public License Version 2.0
  */
 
-/*
- * Following is how we use various fields and flags of underlying
- * struct page(s) to form a zspage.
- *
- * Usage of struct page fields:
- *	page->private: points to zspage
- *	page->index: links together all component pages of a zspage
- *		For the huge page, this is always 0, so we use this field
- *		to store handle.
- *	page->page_type: first object offset in a subpage of zspage
- *
- * Usage of struct page flags:
- *	PG_private: identifies the first component page
- *	PG_owner_priv_1: identifies the huge component page
- *
- */
-
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 /*
@@ -264,6 +247,52 @@  struct mapping_area {
 	enum zs_mapmode vm_mm; /* mapping mode */
 };
 
+/*
+ * struct zsdesc - memory descriptor for zsmalloc memory
+ *
+ * This struct overlays struct page for now. Do not modify without a
+ * good understanding of the issues.
+ *
+ * Usage of struct page flags on zsdesc:
+ *	PG_private: identifies the first component zsdesc
+ */
+struct zsdesc {
+	unsigned long flags;
+
+	/*
+	 * Although not used by zsmalloc, this field is used by non-LRU page migration
+	 * code. Leave it unused.
+	 */
+	struct list_head lru;
+
+	/* Always points to zsmalloc_mops with PAGE_MAPPING_MOVABLE set */
+	struct movable_operations *mops;
+
+	union {
+		/* linked list of all zsdescs in a zspage */
+		struct zsdesc *next;
+		/* for huge zspages */
+		unsigned long handle;
+	};
+	struct zspage *zspage;
+	unsigned int first_obj_offset;
+	unsigned int _refcount;
+};
+
+#define ZSDESC_MATCH(pg, zs) \
+	static_assert(offsetof(struct page, pg) == offsetof(struct zsdesc, zs))
+
+ZSDESC_MATCH(flags, flags);
+ZSDESC_MATCH(lru, lru);
+ZSDESC_MATCH(mapping, mops);
+ZSDESC_MATCH(index, next);
+ZSDESC_MATCH(index, handle);
+ZSDESC_MATCH(private, zspage);
+ZSDESC_MATCH(page_type, first_obj_offset);
+ZSDESC_MATCH(_refcount, _refcount);
+#undef ZSDESC_MATCH
+static_assert(sizeof(struct zsdesc) <= sizeof(struct page));
+
 /* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */
 static void SetZsHugePage(struct zspage *zspage)
 {