diff mbox series

[03/16] mm/migration: remove unneeded local variable page_lru

Message ID 20220304093409.25829-4-linmiaohe@huawei.com (mailing list archive)
State New
Headers show
Series A few cleanup and fixup patches for migration | expand

Commit Message

Miaohe Lin March 4, 2022, 9:33 a.m. UTC
We can use page_is_file_lru() directly to help account the isolated
pages to simplify the code a bit as same as local variable follflags.

Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
---
 mm/migrate.c | 8 ++------
 1 file changed, 2 insertions(+), 6 deletions(-)

Comments

Alistair Popple March 7, 2022, 10:58 a.m. UTC | #1
Miaohe Lin <linmiaohe@huawei.com> writes:

> We can use page_is_file_lru() directly to help account the isolated
> pages to simplify the code a bit as same as local variable follflags.

Looks good, but there are two independent changes here. Even though they are
small they should probably be split into two patches.

> Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
> ---
>  mm/migrate.c | 8 ++------
>  1 file changed, 2 insertions(+), 6 deletions(-)
>
> diff --git a/mm/migrate.c b/mm/migrate.c
> index bc1867a5706c..da5a81052468 100644
> --- a/mm/migrate.c
> +++ b/mm/migrate.c
> @@ -1609,7 +1609,6 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
>  {
>  	struct vm_area_struct *vma;
>  	struct page *page;
> -	unsigned int follflags;
>  	int err;
>
>  	mmap_read_lock(mm);
> @@ -1619,8 +1618,7 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
>  		goto out;
>
>  	/* FOLL_DUMP to ignore special (like zero) pages */
> -	follflags = FOLL_GET | FOLL_DUMP;
> -	page = follow_page(vma, addr, follflags);
> +	page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
>
>  	err = PTR_ERR(page);
>  	if (IS_ERR(page))
> @@ -2033,7 +2031,6 @@ static struct page *alloc_misplaced_dst_page_thp(struct page *page,
>
>  static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
>  {
> -	int page_lru;
>  	int nr_pages = thp_nr_pages(page);
>  	int order = compound_order(page);
>
> @@ -2060,8 +2057,7 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
>  	if (isolate_lru_page(page))
>  		return 0;
>
> -	page_lru = page_is_file_lru(page);
> -	mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
> +	mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page),
>  			    nr_pages);
>
>  	/*
Miaohe Lin March 8, 2022, 11:29 a.m. UTC | #2
On 2022/3/7 18:58, Alistair Popple wrote:
> Miaohe Lin <linmiaohe@huawei.com> writes:
> 
>> We can use page_is_file_lru() directly to help account the isolated
>> pages to simplify the code a bit as same as local variable follflags.
> 
> Looks good, but there are two independent changes here. Even though they are
> small they should probably be split into two patches.

Sounds reasonable. Will try to do this in v2. Thanks.

> 
>> Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
>> ---
>>  mm/migrate.c | 8 ++------
>>  1 file changed, 2 insertions(+), 6 deletions(-)
>>
>> diff --git a/mm/migrate.c b/mm/migrate.c
>> index bc1867a5706c..da5a81052468 100644
>> --- a/mm/migrate.c
>> +++ b/mm/migrate.c
>> @@ -1609,7 +1609,6 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
>>  {
>>  	struct vm_area_struct *vma;
>>  	struct page *page;
>> -	unsigned int follflags;
>>  	int err;
>>
>>  	mmap_read_lock(mm);
>> @@ -1619,8 +1618,7 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
>>  		goto out;
>>
>>  	/* FOLL_DUMP to ignore special (like zero) pages */
>> -	follflags = FOLL_GET | FOLL_DUMP;
>> -	page = follow_page(vma, addr, follflags);
>> +	page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
>>
>>  	err = PTR_ERR(page);
>>  	if (IS_ERR(page))
>> @@ -2033,7 +2031,6 @@ static struct page *alloc_misplaced_dst_page_thp(struct page *page,
>>
>>  static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
>>  {
>> -	int page_lru;
>>  	int nr_pages = thp_nr_pages(page);
>>  	int order = compound_order(page);
>>
>> @@ -2060,8 +2057,7 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
>>  	if (isolate_lru_page(page))
>>  		return 0;
>>
>> -	page_lru = page_is_file_lru(page);
>> -	mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
>> +	mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page),
>>  			    nr_pages);
>>
>>  	/*
diff mbox series

Patch

diff --git a/mm/migrate.c b/mm/migrate.c
index bc1867a5706c..da5a81052468 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1609,7 +1609,6 @@  static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
 {
 	struct vm_area_struct *vma;
 	struct page *page;
-	unsigned int follflags;
 	int err;
 
 	mmap_read_lock(mm);
@@ -1619,8 +1618,7 @@  static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
 		goto out;
 
 	/* FOLL_DUMP to ignore special (like zero) pages */
-	follflags = FOLL_GET | FOLL_DUMP;
-	page = follow_page(vma, addr, follflags);
+	page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
 
 	err = PTR_ERR(page);
 	if (IS_ERR(page))
@@ -2033,7 +2031,6 @@  static struct page *alloc_misplaced_dst_page_thp(struct page *page,
 
 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
 {
-	int page_lru;
 	int nr_pages = thp_nr_pages(page);
 	int order = compound_order(page);
 
@@ -2060,8 +2057,7 @@  static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
 	if (isolate_lru_page(page))
 		return 0;
 
-	page_lru = page_is_file_lru(page);
-	mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
+	mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page),
 			    nr_pages);
 
 	/*