diff mbox series

[v6,1/6] ksm: abstract the function try_to_get_old_rmap_item

Message ID 202302100916423431376@zte.com.cn (mailing list archive)
State New
Headers show
Series ksm: support tracking KSM-placed zero-pages | expand

Commit Message

Yang Yang Feb. 10, 2023, 1:16 a.m. UTC
From: xu xin <xu.xin16@zte.com.cn>

A new function try_to_get_old_rmap_item is abstracted from
get_next_rmap_item. This function will be reused by the subsequent
patches about counting ksm_zero_pages.

The patch improves the readability and reusability of KSM code.

Signed-off-by: xu xin <xu.xin16@zte.com.cn>
Cc: David Hildenbrand <david@redhat.com>
Cc: Claudio Imbrenda <imbrenda@linux.ibm.com>
Cc: Xuexin Jiang <jiang.xuexin@zte.com.cn>
Reviewed-by: Xiaokai Ran <ran.xiaokai@zte.com.cn>
Reviewed-by: Yang Yang <yang.yang29@zte.com.cn>

v5->v6:
Modify some comments according to David's suggestions.
---
 mm/ksm.c | 27 +++++++++++++++++++++------
 1 file changed, 21 insertions(+), 6 deletions(-)

Comments

Claudio Imbrenda March 7, 2023, 6:24 p.m. UTC | #1
On Fri, 10 Feb 2023 09:16:42 +0800 (CST)
<yang.yang29@zte.com.cn> wrote:

> From: xu xin <xu.xin16@zte.com.cn>
> 
> A new function try_to_get_old_rmap_item is abstracted from
> get_next_rmap_item. This function will be reused by the subsequent
> patches about counting ksm_zero_pages.
> 
> The patch improves the readability and reusability of KSM code.
> 
> Signed-off-by: xu xin <xu.xin16@zte.com.cn>
> Cc: David Hildenbrand <david@redhat.com>
> Cc: Claudio Imbrenda <imbrenda@linux.ibm.com>
> Cc: Xuexin Jiang <jiang.xuexin@zte.com.cn>
> Reviewed-by: Xiaokai Ran <ran.xiaokai@zte.com.cn>
> Reviewed-by: Yang Yang <yang.yang29@zte.com.cn>
> 
> v5->v6:
> Modify some comments according to David's suggestions.
> ---
>  mm/ksm.c | 27 +++++++++++++++++++++------
>  1 file changed, 21 insertions(+), 6 deletions(-)
> 
> diff --git a/mm/ksm.c b/mm/ksm.c
> index 83e2f74ae7da..905a79d213da 100644
> --- a/mm/ksm.c
> +++ b/mm/ksm.c
> @@ -2214,23 +2214,38 @@ static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_ite
>  	}
>  }
> 
> -static struct ksm_rmap_item *get_next_rmap_item(struct ksm_mm_slot *mm_slot,
> -					    struct ksm_rmap_item **rmap_list,
> -					    unsigned long addr)
> +static struct ksm_rmap_item *try_to_get_old_rmap_item(unsigned long addr,
> +					 struct ksm_rmap_item **rmap_list)
>  {
> -	struct ksm_rmap_item *rmap_item;
> -
>  	while (*rmap_list) {
> -		rmap_item = *rmap_list;
> +		struct ksm_rmap_item *rmap_item = *rmap_list;

why are you declaring this here? I think it's more clear if you
declare the variable at the beginning of the function, like it was
before

> +
>  		if ((rmap_item->address & PAGE_MASK) == addr)
>  			return rmap_item;
>  		if (rmap_item->address > addr)
>  			break;
>  		*rmap_list = rmap_item->rmap_list;
> +		/*
> +		 * If we end up here, the VMA is MADV_UNMERGEABLE or its page
> +		 * is ineligible or discarded, e.g. MADV_DONTNEED.
> +		 */
>  		remove_rmap_item_from_tree(rmap_item);
>  		free_rmap_item(rmap_item);
>  	}
> 
> +	return NULL;
> +}
> +
> +static struct ksm_rmap_item *get_next_rmap_item(struct ksm_mm_slot *mm_slot,
> +					    struct ksm_rmap_item **rmap_list,
> +					    unsigned long addr)
> +{
> +	struct ksm_rmap_item *rmap_item;
> +
> +	rmap_item = try_to_get_old_rmap_item(addr, rmap_list);
> +	if (rmap_item)
> +		return rmap_item;
> +
>  	rmap_item = alloc_rmap_item();
>  	if (rmap_item) {
>  		/* It has already been zeroed */
diff mbox series

Patch

diff --git a/mm/ksm.c b/mm/ksm.c
index 83e2f74ae7da..905a79d213da 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -2214,23 +2214,38 @@  static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_ite
 	}
 }

-static struct ksm_rmap_item *get_next_rmap_item(struct ksm_mm_slot *mm_slot,
-					    struct ksm_rmap_item **rmap_list,
-					    unsigned long addr)
+static struct ksm_rmap_item *try_to_get_old_rmap_item(unsigned long addr,
+					 struct ksm_rmap_item **rmap_list)
 {
-	struct ksm_rmap_item *rmap_item;
-
 	while (*rmap_list) {
-		rmap_item = *rmap_list;
+		struct ksm_rmap_item *rmap_item = *rmap_list;
+
 		if ((rmap_item->address & PAGE_MASK) == addr)
 			return rmap_item;
 		if (rmap_item->address > addr)
 			break;
 		*rmap_list = rmap_item->rmap_list;
+		/*
+		 * If we end up here, the VMA is MADV_UNMERGEABLE or its page
+		 * is ineligible or discarded, e.g. MADV_DONTNEED.
+		 */
 		remove_rmap_item_from_tree(rmap_item);
 		free_rmap_item(rmap_item);
 	}

+	return NULL;
+}
+
+static struct ksm_rmap_item *get_next_rmap_item(struct ksm_mm_slot *mm_slot,
+					    struct ksm_rmap_item **rmap_list,
+					    unsigned long addr)
+{
+	struct ksm_rmap_item *rmap_item;
+
+	rmap_item = try_to_get_old_rmap_item(addr, rmap_list);
+	if (rmap_item)
+		return rmap_item;
+
 	rmap_item = alloc_rmap_item();
 	if (rmap_item) {
 		/* It has already been zeroed */