diff mbox series

[4/4] mm: Generalize putback scan functions

Message ID 154998445694.18704.16751838197928455484.stgit@localhost.localdomain (mailing list archive)
State New, archived
Headers show
Series mm: Generalize putback functions | expand

Commit Message

Kirill Tkhai Feb. 12, 2019, 3:14 p.m. UTC
This combines two similar functions move_active_pages_to_lru()
and putback_inactive_pages() into single move_pages_to_lru().
This remove duplicate code and makes object file size smaller.

Before:
   text	   data	    bss	    dec	    hex	filename
  57082	   4732	    128	  61942	   f1f6	mm/vmscan.o
After:
   text	   data	    bss	    dec	    hex	filename
  55112	   4600	    128	  59840	   e9c0	mm/vmscan.o

Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
---
 mm/vmscan.c |  124 ++++++++++++++++++++---------------------------------------
 1 file changed, 41 insertions(+), 83 deletions(-)

Comments

Daniel Jordan Feb. 13, 2019, 7:20 p.m. UTC | #1
On Tue, Feb 12, 2019 at 06:14:16PM +0300, Kirill Tkhai wrote:
> +static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
> +						     struct list_head *list)
>  {
>  	struct pglist_data *pgdat = lruvec_pgdat(lruvec);
> +	int nr_pages, nr_moved = 0;
>  	LIST_HEAD(pages_to_free);
> +	struct page *page;
> +	enum lru_list lru;
>  
> -	/*
> -	 * Put back any unfreeable pages.
> -	 */
> -	while (!list_empty(page_list)) {
> -		struct page *page = lru_to_page(page_list);
> -		int lru;
> -
> -		VM_BUG_ON_PAGE(PageLRU(page), page);
> -		list_del(&page->lru);
> +	while (!list_empty(list)) {
> +		page = lru_to_page(list);
>  		if (unlikely(!page_evictable(page))) {
> +			list_del_init(&page->lru);
>  			spin_unlock_irq(&pgdat->lru_lock);
>  			putback_lru_page(page);
>  			spin_lock_irq(&pgdat->lru_lock);
>  			continue;
>  		}
> -
>  		lruvec = mem_cgroup_page_lruvec(page, pgdat);
>  
> +		VM_BUG_ON_PAGE(PageLRU(page), page);

Nit, but moving the BUG down here weakens it a little bit since we miss
checking it if the page is unevictable.


Maybe worth pointing out in the changelog that the main difference from
combining these two functions is that we're now checking for !page_evictable
coming from shrink_active_list, which shouldn't change any behavior since that
path works with evictable pages only.
Kirill Tkhai Feb. 14, 2019, 10:29 a.m. UTC | #2
On 13.02.2019 22:20, Daniel Jordan wrote:
> On Tue, Feb 12, 2019 at 06:14:16PM +0300, Kirill Tkhai wrote:
>> +static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
>> +						     struct list_head *list)
>>  {
>>  	struct pglist_data *pgdat = lruvec_pgdat(lruvec);
>> +	int nr_pages, nr_moved = 0;
>>  	LIST_HEAD(pages_to_free);
>> +	struct page *page;
>> +	enum lru_list lru;
>>  
>> -	/*
>> -	 * Put back any unfreeable pages.
>> -	 */
>> -	while (!list_empty(page_list)) {
>> -		struct page *page = lru_to_page(page_list);
>> -		int lru;
>> -
>> -		VM_BUG_ON_PAGE(PageLRU(page), page);
>> -		list_del(&page->lru);
>> +	while (!list_empty(list)) {
>> +		page = lru_to_page(list);
>>  		if (unlikely(!page_evictable(page))) {
>> +			list_del_init(&page->lru);
>>  			spin_unlock_irq(&pgdat->lru_lock);
>>  			putback_lru_page(page);
>>  			spin_lock_irq(&pgdat->lru_lock);
>>  			continue;
>>  		}
>> -
>>  		lruvec = mem_cgroup_page_lruvec(page, pgdat);
>>  
>> +		VM_BUG_ON_PAGE(PageLRU(page), page);
> 
> Nit, but moving the BUG down here weakens it a little bit since we miss
> checking it if the page is unevictable.

Yeah, this is bad.
 
> Maybe worth pointing out in the changelog that the main difference from
> combining these two functions is that we're now checking for !page_evictable
> coming from shrink_active_list, which shouldn't change any behavior since that
> path works with evictable pages only.

Sounds good.
diff mbox series

Patch

diff --git a/mm/vmscan.c b/mm/vmscan.c
index 88fa71e4c28f..66e70cf1dd94 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1807,33 +1807,53 @@  static int too_many_isolated(struct pglist_data *pgdat, int file,
 	return isolated > inactive;
 }
 
-static noinline_for_stack void
-putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
+/*
+ * This moves pages from @list to corresponding LRU list.
+ *
+ * We move them the other way if the page is referenced by one or more
+ * processes, from rmap.
+ *
+ * If the pages are mostly unmapped, the processing is fast and it is
+ * appropriate to hold zone_lru_lock across the whole operation.  But if
+ * the pages are mapped, the processing is slow (page_referenced()) so we
+ * should drop zone_lru_lock around each page.  It's impossible to balance
+ * this, so instead we remove the pages from the LRU while processing them.
+ * It is safe to rely on PG_active against the non-LRU pages in here because
+ * nobody will play with that bit on a non-LRU page.
+ *
+ * The downside is that we have to touch page->_refcount against each page.
+ * But we had to alter page->flags anyway.
+ *
+ * Returns the number of pages moved to the given lruvec.
+ */
+
+static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
+						     struct list_head *list)
 {
 	struct pglist_data *pgdat = lruvec_pgdat(lruvec);
+	int nr_pages, nr_moved = 0;
 	LIST_HEAD(pages_to_free);
+	struct page *page;
+	enum lru_list lru;
 
-	/*
-	 * Put back any unfreeable pages.
-	 */
-	while (!list_empty(page_list)) {
-		struct page *page = lru_to_page(page_list);
-		int lru;
-
-		VM_BUG_ON_PAGE(PageLRU(page), page);
-		list_del(&page->lru);
+	while (!list_empty(list)) {
+		page = lru_to_page(list);
 		if (unlikely(!page_evictable(page))) {
+			list_del_init(&page->lru);
 			spin_unlock_irq(&pgdat->lru_lock);
 			putback_lru_page(page);
 			spin_lock_irq(&pgdat->lru_lock);
 			continue;
 		}
-
 		lruvec = mem_cgroup_page_lruvec(page, pgdat);
 
+		VM_BUG_ON_PAGE(PageLRU(page), page);
 		SetPageLRU(page);
 		lru = page_lru(page);
-		add_page_to_lru_list(page, lruvec, lru);
+
+		nr_pages = hpage_nr_pages(page);
+		update_lru_size(lruvec, lru, page_zonenum(page), nr_pages);
+		list_move(&page->lru, &lruvec->lists[lru]);
 
 		if (put_page_testzero(page)) {
 			__ClearPageLRU(page);
@@ -1847,13 +1867,17 @@  putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
 				spin_lock_irq(&pgdat->lru_lock);
 			} else
 				list_add(&page->lru, &pages_to_free);
+		} else {
+			nr_moved += nr_pages;
 		}
 	}
 
 	/*
 	 * To save our caller's stack, now use input list for pages to free.
 	 */
-	list_splice(&pages_to_free, page_list);
+	list_splice(&pages_to_free, list);
+
+	return nr_moved;
 }
 
 /*
@@ -1945,7 +1969,7 @@  shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
 	reclaim_stat->recent_rotated[0] = stat.nr_activate[0];
 	reclaim_stat->recent_rotated[1] = stat.nr_activate[1];
 
-	putback_inactive_pages(lruvec, &page_list);
+	move_pages_to_lru(lruvec, &page_list);
 
 	__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
 
@@ -1982,72 +2006,6 @@  shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
 	return nr_reclaimed;
 }
 
-/*
- * This moves pages from the active list to the inactive list.
- *
- * We move them the other way if the page is referenced by one or more
- * processes, from rmap.
- *
- * If the pages are mostly unmapped, the processing is fast and it is
- * appropriate to hold zone_lru_lock across the whole operation.  But if
- * the pages are mapped, the processing is slow (page_referenced()) so we
- * should drop zone_lru_lock around each page.  It's impossible to balance
- * this, so instead we remove the pages from the LRU while processing them.
- * It is safe to rely on PG_active against the non-LRU pages in here because
- * nobody will play with that bit on a non-LRU page.
- *
- * The downside is that we have to touch page->_refcount against each page.
- * But we had to alter page->flags anyway.
- *
- * Returns the number of pages moved to the given lru.
- */
-
-static unsigned move_active_pages_to_lru(struct lruvec *lruvec,
-				     struct list_head *list,
-				     enum lru_list lru)
-{
-	struct pglist_data *pgdat = lruvec_pgdat(lruvec);
-	LIST_HEAD(pages_to_free);
-	struct page *page;
-	int nr_pages;
-	int nr_moved = 0;
-
-	while (!list_empty(list)) {
-		page = lru_to_page(list);
-		lruvec = mem_cgroup_page_lruvec(page, pgdat);
-
-		VM_BUG_ON_PAGE(PageLRU(page), page);
-		SetPageLRU(page);
-
-		nr_pages = hpage_nr_pages(page);
-		update_lru_size(lruvec, lru, page_zonenum(page), nr_pages);
-		list_move(&page->lru, &lruvec->lists[lru]);
-
-		if (put_page_testzero(page)) {
-			__ClearPageLRU(page);
-			__ClearPageActive(page);
-			del_page_from_lru_list(page, lruvec, lru);
-
-			if (unlikely(PageCompound(page))) {
-				spin_unlock_irq(&pgdat->lru_lock);
-				mem_cgroup_uncharge(page);
-				(*get_compound_page_dtor(page))(page);
-				spin_lock_irq(&pgdat->lru_lock);
-			} else
-				list_add(&page->lru, &pages_to_free);
-		} else {
-			nr_moved += nr_pages;
-		}
-	}
-
-	/*
-	 * To save our caller's stack, now use input list for pages to free.
-	 */
-	list_splice(&pages_to_free, list);
-
-	return nr_moved;
-}
-
 static void shrink_active_list(unsigned long nr_to_scan,
 			       struct lruvec *lruvec,
 			       struct scan_control *sc,
@@ -2134,8 +2092,8 @@  static void shrink_active_list(unsigned long nr_to_scan,
 	 */
 	reclaim_stat->recent_rotated[file] += nr_rotated;
 
-	nr_activate = move_active_pages_to_lru(lruvec, &l_active, lru);
-	nr_deactivate = move_active_pages_to_lru(lruvec, &l_inactive, lru - LRU_ACTIVE);
+	nr_activate = move_pages_to_lru(lruvec, &l_active);
+	nr_deactivate = move_pages_to_lru(lruvec, &l_inactive);
 	/* Keep all free pages are in l_active list */
 	list_splice(&l_inactive, &l_active);