diff mbox series

[2/3] mm/migrate: clean up some minor coding style

Message ID 20200107211208.24595-3-rcampbell@nvidia.com (mailing list archive)
State New, archived
Headers show
Series mm/migrate: add missing check for stable | expand

Commit Message

Ralph Campbell Jan. 7, 2020, 9:12 p.m. UTC
Fix some comment typos and coding style clean up in preparation for the
next patch. No functional changes.

Signed-off-by: Ralph Campbell <rcampbell@nvidia.com>
---
 mm/migrate.c | 34 +++++++++++++---------------------
 1 file changed, 13 insertions(+), 21 deletions(-)

Comments

Chris Down Jan. 7, 2020, 9:33 p.m. UTC | #1
Ralph Campbell writes:
>Fix some comment typos and coding style clean up in preparation for the
>next patch. No functional changes.
>
>Signed-off-by: Ralph Campbell <rcampbell@nvidia.com>

Looks fine.

Acked-by: Chris Down <chris@chrisdown.name>
Christoph Hellwig Jan. 8, 2020, 7:11 a.m. UTC | #2
On Tue, Jan 07, 2020 at 01:12:07PM -0800, Ralph Campbell wrote:
> Fix some comment typos and coding style clean up in preparation for the
> next patch. No functional changes.
> 
> Signed-off-by: Ralph Campbell <rcampbell@nvidia.com>

Looks good,

Reviewed-by: Christoph Hellwig <hch@lst.de>
diff mbox series

Patch

diff --git a/mm/migrate.c b/mm/migrate.c
index b7f5d9ada429..4b1a6d69afb5 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -986,7 +986,7 @@  static int move_to_new_page(struct page *newpage, struct page *page,
 		}
 
 		/*
-		 * Anonymous and movable page->mapping will be cleard by
+		 * Anonymous and movable page->mapping will be cleared by
 		 * free_pages_prepare so don't reset it here for keeping
 		 * the type to work PageAnon, for example.
 		 */
@@ -1199,8 +1199,7 @@  static ICE_noinline int unmap_and_move(new_page_t get_new_page,
 		/*
 		 * A page that has been migrated has all references
 		 * removed and will be freed. A page that has not been
-		 * migrated will have kepts its references and be
-		 * restored.
+		 * migrated will have kept its references and be restored.
 		 */
 		list_del(&page->lru);
 
@@ -2759,27 +2758,18 @@  static void migrate_vma_insert_page(struct migrate_vma *migrate,
 	if (pte_present(*ptep)) {
 		unsigned long pfn = pte_pfn(*ptep);
 
-		if (!is_zero_pfn(pfn)) {
-			pte_unmap_unlock(ptep, ptl);
-			mem_cgroup_cancel_charge(page, memcg, false);
-			goto abort;
-		}
+		if (!is_zero_pfn(pfn))
+			goto unlock_abort;
 		flush = true;
-	} else if (!pte_none(*ptep)) {
-		pte_unmap_unlock(ptep, ptl);
-		mem_cgroup_cancel_charge(page, memcg, false);
-		goto abort;
-	}
+	} else if (!pte_none(*ptep))
+		goto unlock_abort;
 
 	/*
-	 * Check for usefaultfd but do not deliver the fault. Instead,
+	 * Check for userfaultfd but do not deliver the fault. Instead,
 	 * just back off.
 	 */
-	if (userfaultfd_missing(vma)) {
-		pte_unmap_unlock(ptep, ptl);
-		mem_cgroup_cancel_charge(page, memcg, false);
-		goto abort;
-	}
+	if (userfaultfd_missing(vma))
+		goto unlock_abort;
 
 	inc_mm_counter(mm, MM_ANONPAGES);
 	page_add_new_anon_rmap(page, vma, addr, false);
@@ -2803,6 +2793,9 @@  static void migrate_vma_insert_page(struct migrate_vma *migrate,
 	*src = MIGRATE_PFN_MIGRATE;
 	return;
 
+unlock_abort:
+	pte_unmap_unlock(ptep, ptl);
+	mem_cgroup_cancel_charge(page, memcg, false);
 abort:
 	*src &= ~MIGRATE_PFN_MIGRATE;
 }
@@ -2835,9 +2828,8 @@  void migrate_vma_pages(struct migrate_vma *migrate)
 		}
 
 		if (!page) {
-			if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE)) {
+			if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE))
 				continue;
-			}
 			if (!notified) {
 				notified = true;