diff mbox series

[1/6] mm: migrate_device: convert to migrate_device_coherent_folio()

Message ID 20240826065814.1336616-2-wangkefeng.wang@huawei.com (mailing list archive)
State New
Headers show
Series mm: finish isolate/putback_lru_page() | expand

Commit Message

Kefeng Wang Aug. 26, 2024, 6:58 a.m. UTC
Save few calls to compound_head() and use folio throughout.

Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
 mm/gup.c            |  2 +-
 mm/internal.h       |  2 +-
 mm/migrate_device.c | 30 +++++++++++++++---------------
 3 files changed, 17 insertions(+), 17 deletions(-)

Comments

David Hildenbrand Aug. 26, 2024, 3:04 p.m. UTC | #1
On 26.08.24 08:58, Kefeng Wang wrote:
> Save few calls to compound_head() and use folio throughout.
> 
> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
> ---

Acked-by: David Hildenbrand <david@redhat.com>
Vishal Moola Aug. 26, 2024, 5:52 p.m. UTC | #2
On Mon, Aug 26, 2024 at 02:58:09PM +0800, Kefeng Wang wrote:
> Save few calls to compound_head() and use folio throughout.
> 
> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>

Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Alistair Popple Aug. 27, 2024, 5:42 a.m. UTC | #3
Kefeng Wang <wangkefeng.wang@huawei.com> writes:

> -	WARN_ON_ONCE(PageCompound(page));
> +	WARN_ON_ONCE(folio_test_large(folio));

Note this isn't _quite_ the same as checking PageCompound() which also
checks page->compound_head & 1, but I'm guessing that should never
happen and certainly not for ZONE_DEVICE pages which can't (yet) be
compound anyway so:

Reviewed-by: Alistair Popple <apopple@nvidia.com>

>  
> -	lock_page(page);
> -	src_pfn = migrate_pfn(page_to_pfn(page)) | MIGRATE_PFN_MIGRATE;
> +	folio_lock(folio);
> +	src_pfn = migrate_pfn(folio_pfn(folio)) | MIGRATE_PFN_MIGRATE;
>  
>  	/*
>  	 * We don't have a VMA and don't need to walk the page tables to find
> -	 * the source page. So call migrate_vma_unmap() directly to unmap the
> -	 * page as migrate_vma_setup() will fail if args.vma == NULL.
> +	 * the source folio. So call migrate_vma_unmap() directly to unmap the
> +	 * folio as migrate_vma_setup() will fail if args.vma == NULL.
>  	 */
>  	migrate_device_unmap(&src_pfn, 1, NULL);
>  	if (!(src_pfn & MIGRATE_PFN_MIGRATE))
>  		return -EBUSY;
>  
> -	dpage = alloc_page(GFP_USER | __GFP_NOWARN);
> -	if (dpage) {
> -		lock_page(dpage);
> -		dst_pfn = migrate_pfn(page_to_pfn(dpage));
> +	dfolio = folio_alloc(GFP_USER | __GFP_NOWARN, 0);
> +	if (dfolio) {
> +		folio_lock(dfolio);
> +		dst_pfn = migrate_pfn(folio_pfn(dfolio));
>  	}
>  
>  	migrate_device_pages(&src_pfn, &dst_pfn, 1);
>  	if (src_pfn & MIGRATE_PFN_MIGRATE)
> -		copy_highpage(dpage, page);
> +		folio_copy(dfolio, folio);
>  	migrate_device_finalize(&src_pfn, &dst_pfn, 1);
>  
>  	if (src_pfn & MIGRATE_PFN_MIGRATE)
diff mbox series

Patch

diff --git a/mm/gup.c b/mm/gup.c
index d19884e097fd..5defd5e6d8f8 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -2335,7 +2335,7 @@  static int migrate_longterm_unpinnable_folios(
 			folio_get(folio);
 			gup_put_folio(folio, 1, FOLL_PIN);
 
-			if (migrate_device_coherent_page(&folio->page)) {
+			if (migrate_device_coherent_folio(folio)) {
 				ret = -EBUSY;
 				goto err;
 			}
diff --git a/mm/internal.h b/mm/internal.h
index e1e139e412d1..0f4750f77f59 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1209,7 +1209,7 @@  int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
 		      int *last_cpupid);
 
 void free_zone_device_folio(struct folio *folio);
-int migrate_device_coherent_page(struct page *page);
+int migrate_device_coherent_folio(struct folio *folio);
 
 /*
  * mm/gup.c
diff --git a/mm/migrate_device.c b/mm/migrate_device.c
index 6d66dc1c6ffa..82d75205dda8 100644
--- a/mm/migrate_device.c
+++ b/mm/migrate_device.c
@@ -708,7 +708,7 @@  static void __migrate_device_pages(unsigned long *src_pfns,
 
 			/*
 			 * The only time there is no vma is when called from
-			 * migrate_device_coherent_page(). However this isn't
+			 * migrate_device_coherent_folio(). However this isn't
 			 * called if the page could not be unmapped.
 			 */
 			VM_BUG_ON(!migrate);
@@ -921,38 +921,38 @@  int migrate_device_range(unsigned long *src_pfns, unsigned long start,
 EXPORT_SYMBOL(migrate_device_range);
 
 /*
- * Migrate a device coherent page back to normal memory. The caller should have
- * a reference on page which will be copied to the new page if migration is
+ * Migrate a device coherent folio back to normal memory. The caller should have
+ * a reference on folio which will be copied to the new folio if migration is
  * successful or dropped on failure.
  */
-int migrate_device_coherent_page(struct page *page)
+int migrate_device_coherent_folio(struct folio *folio)
 {
 	unsigned long src_pfn, dst_pfn = 0;
-	struct page *dpage;
+	struct folio *dfolio;
 
-	WARN_ON_ONCE(PageCompound(page));
+	WARN_ON_ONCE(folio_test_large(folio));
 
-	lock_page(page);
-	src_pfn = migrate_pfn(page_to_pfn(page)) | MIGRATE_PFN_MIGRATE;
+	folio_lock(folio);
+	src_pfn = migrate_pfn(folio_pfn(folio)) | MIGRATE_PFN_MIGRATE;
 
 	/*
 	 * We don't have a VMA and don't need to walk the page tables to find
-	 * the source page. So call migrate_vma_unmap() directly to unmap the
-	 * page as migrate_vma_setup() will fail if args.vma == NULL.
+	 * the source folio. So call migrate_vma_unmap() directly to unmap the
+	 * folio as migrate_vma_setup() will fail if args.vma == NULL.
 	 */
 	migrate_device_unmap(&src_pfn, 1, NULL);
 	if (!(src_pfn & MIGRATE_PFN_MIGRATE))
 		return -EBUSY;
 
-	dpage = alloc_page(GFP_USER | __GFP_NOWARN);
-	if (dpage) {
-		lock_page(dpage);
-		dst_pfn = migrate_pfn(page_to_pfn(dpage));
+	dfolio = folio_alloc(GFP_USER | __GFP_NOWARN, 0);
+	if (dfolio) {
+		folio_lock(dfolio);
+		dst_pfn = migrate_pfn(folio_pfn(dfolio));
 	}
 
 	migrate_device_pages(&src_pfn, &dst_pfn, 1);
 	if (src_pfn & MIGRATE_PFN_MIGRATE)
-		copy_highpage(dpage, page);
+		folio_copy(dfolio, folio);
 	migrate_device_finalize(&src_pfn, &dst_pfn, 1);
 
 	if (src_pfn & MIGRATE_PFN_MIGRATE)