diff mbox series

[2/4] mm: Move nr_deactivate accounting to shrink_active_list()

Message ID 154998444590.18704.9387109537711017589.stgit@localhost.localdomain (mailing list archive)
State New, archived
Headers show
Series mm: Generalize putback functions | expand

Commit Message

Kirill Tkhai Feb. 12, 2019, 3:14 p.m. UTC
We know, which LRU is not active.

Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
---
 mm/vmscan.c |   10 ++++------
 1 file changed, 4 insertions(+), 6 deletions(-)

Comments

Daniel Jordan Feb. 13, 2019, 7:13 p.m. UTC | #1
On Tue, Feb 12, 2019 at 06:14:05PM +0300, Kirill Tkhai wrote:
> We know, which LRU is not active.

s/,//

> 
> Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
> ---
>  mm/vmscan.c |   10 ++++------
>  1 file changed, 4 insertions(+), 6 deletions(-)
> 
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index 84542004a277..8d7d55e71511 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -2040,12 +2040,6 @@ static unsigned move_active_pages_to_lru(struct lruvec *lruvec,
>  		}
>  	}
>  
> -	if (!is_active_lru(lru)) {
> -		__count_vm_events(PGDEACTIVATE, nr_moved);
> -		count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
> -				   nr_moved);
> -	}
> -
>  	return nr_moved;
>  }
>  
> @@ -2137,6 +2131,10 @@ static void shrink_active_list(unsigned long nr_to_scan,
>  
>  	nr_activate = move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru);
>  	nr_deactivate = move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE);
> +
> +	__count_vm_events(PGDEACTIVATE, nr_deactivate);
> +	__count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate);

Nice, you're using the irq-unsafe one since irqs are already disabled.  I guess
this was missed in c3cc39118c361.  Do you want to insert a patch before this
one that converts all instances of this pattern in vmscan.c over?

There's a similar oversight in lru_lazyfree_fn with count_memcg_page_event, but
that'd mean __count_memcg_page_event which is probably overkill.
Kirill Tkhai Feb. 14, 2019, 10:30 a.m. UTC | #2
On 13.02.2019 22:13, Daniel Jordan wrote:
> On Tue, Feb 12, 2019 at 06:14:05PM +0300, Kirill Tkhai wrote:
>> We know, which LRU is not active.
> 
> s/,//
> 
>>
>> Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
>> ---
>>  mm/vmscan.c |   10 ++++------
>>  1 file changed, 4 insertions(+), 6 deletions(-)
>>
>> diff --git a/mm/vmscan.c b/mm/vmscan.c
>> index 84542004a277..8d7d55e71511 100644
>> --- a/mm/vmscan.c
>> +++ b/mm/vmscan.c
>> @@ -2040,12 +2040,6 @@ static unsigned move_active_pages_to_lru(struct lruvec *lruvec,
>>  		}
>>  	}
>>  
>> -	if (!is_active_lru(lru)) {
>> -		__count_vm_events(PGDEACTIVATE, nr_moved);
>> -		count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
>> -				   nr_moved);
>> -	}
>> -
>>  	return nr_moved;
>>  }
>>  
>> @@ -2137,6 +2131,10 @@ static void shrink_active_list(unsigned long nr_to_scan,
>>  
>>  	nr_activate = move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru);
>>  	nr_deactivate = move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE);
>> +
>> +	__count_vm_events(PGDEACTIVATE, nr_deactivate);
>> +	__count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate);
> 
> Nice, you're using the irq-unsafe one since irqs are already disabled.  I guess
> this was missed in c3cc39118c361.  Do you want to insert a patch before this
> one that converts all instances of this pattern in vmscan.c over?

I had that in plan, but I'm not sure I want to do that in this patchset. Maybe,
something next later on top of this.

> There's a similar oversight in lru_lazyfree_fn with count_memcg_page_event, but
> that'd mean __count_memcg_page_event which is probably overkill.
diff mbox series

Patch

diff --git a/mm/vmscan.c b/mm/vmscan.c
index 84542004a277..8d7d55e71511 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2040,12 +2040,6 @@  static unsigned move_active_pages_to_lru(struct lruvec *lruvec,
 		}
 	}
 
-	if (!is_active_lru(lru)) {
-		__count_vm_events(PGDEACTIVATE, nr_moved);
-		count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
-				   nr_moved);
-	}
-
 	return nr_moved;
 }
 
@@ -2137,6 +2131,10 @@  static void shrink_active_list(unsigned long nr_to_scan,
 
 	nr_activate = move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru);
 	nr_deactivate = move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE);
+
+	__count_vm_events(PGDEACTIVATE, nr_deactivate);
+	__count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate);
+
 	__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
 	spin_unlock_irq(&pgdat->lru_lock);