@@ -691,7 +691,7 @@ static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry)
if (!non_swap_entry(entry))
dec_mm_counter(mm, MM_SWAPENTS);
else if (is_migration_entry(entry)) {
- struct page *page = migration_entry_to_page(entry);
+ struct page *page = pfn_swap_entry_to_page(entry);
dec_mm_counter(mm, mm_counter(page));
}
@@ -514,10 +514,8 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
} else {
mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
}
- } else if (is_migration_entry(swpent))
- page = migration_entry_to_page(swpent);
- else if (is_device_private_entry(swpent))
- page = device_private_entry_to_page(swpent);
+ } else if (is_pfn_swap_entry(swpent))
+ page = pfn_swap_entry_to_page(swpent);
} else if (unlikely(IS_ENABLED(CONFIG_SHMEM) && mss->check_shmem_swap
&& pte_none(*pte))) {
page = xa_load(&vma->vm_file->f_mapping->i_pages,
@@ -549,7 +547,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
swp_entry_t entry = pmd_to_swp_entry(*pmd);
if (is_migration_entry(entry))
- page = migration_entry_to_page(entry);
+ page = pfn_swap_entry_to_page(entry);
}
if (IS_ERR_OR_NULL(page))
return;
@@ -694,10 +692,8 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
} else if (is_swap_pte(*pte)) {
swp_entry_t swpent = pte_to_swp_entry(*pte);
- if (is_migration_entry(swpent))
- page = migration_entry_to_page(swpent);
- else if (is_device_private_entry(swpent))
- page = device_private_entry_to_page(swpent);
+ if (is_pfn_swap_entry(swpent))
+ page = pfn_swap_entry_to_page(swpent);
}
if (page) {
int mapcount = page_mapcount(page);
@@ -1384,11 +1380,8 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
frame = swp_type(entry) |
(swp_offset(entry) << MAX_SWAPFILES_SHIFT);
flags |= PM_SWAP;
- if (is_migration_entry(entry))
- page = migration_entry_to_page(entry);
-
- if (is_device_private_entry(entry))
- page = device_private_entry_to_page(entry);
+ if (is_pfn_swap_entry(entry))
+ page = pfn_swap_entry_to_page(entry);
}
if (page && !PageAnon(page))
@@ -1445,7 +1438,7 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
if (pmd_swp_soft_dirty(pmd))
flags |= PM_SOFT_DIRTY;
VM_BUG_ON(!is_pmd_migration_entry(pmd));
- page = migration_entry_to_page(entry);
+ page = pfn_swap_entry_to_page(entry);
}
#endif
@@ -545,8 +545,8 @@ static inline void show_swap_cache_info(void)
{
}
-#define free_swap_and_cache(e) ({(is_migration_entry(e) || is_device_private_entry(e));})
-#define swapcache_prepare(e) ({(is_migration_entry(e) || is_device_private_entry(e));})
+/* used to sanity check ptes in zap_pte_range when CONFIG_SWAP=0 */
+#define free_swap_and_cache(e) is_pfn_swap_entry(e)
static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
{
@@ -121,16 +121,6 @@ static inline bool is_write_device_private_entry(swp_entry_t entry)
{
return unlikely(swp_type(entry) == SWP_DEVICE_WRITE);
}
-
-static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
-{
- return swp_offset(entry);
-}
-
-static inline struct page *device_private_entry_to_page(swp_entry_t entry)
-{
- return pfn_to_page(swp_offset(entry));
-}
#else /* CONFIG_DEVICE_PRIVATE */
static inline swp_entry_t make_device_private_entry(struct page *page, bool write)
{
@@ -150,16 +140,6 @@ static inline bool is_write_device_private_entry(swp_entry_t entry)
{
return false;
}
-
-static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
-{
- return 0;
-}
-
-static inline struct page *device_private_entry_to_page(swp_entry_t entry)
-{
- return NULL;
-}
#endif /* CONFIG_DEVICE_PRIVATE */
#ifdef CONFIG_MIGRATION
@@ -182,22 +162,6 @@ static inline int is_write_migration_entry(swp_entry_t entry)
return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE);
}
-static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
-{
- return swp_offset(entry);
-}
-
-static inline struct page *migration_entry_to_page(swp_entry_t entry)
-{
- struct page *p = pfn_to_page(swp_offset(entry));
- /*
- * Any use of migration entries may only occur while the
- * corresponding page is locked
- */
- BUG_ON(!PageLocked(compound_head(p)));
- return p;
-}
-
static inline void make_migration_entry_read(swp_entry_t *entry)
{
*entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry));
@@ -217,16 +181,6 @@ static inline int is_migration_entry(swp_entry_t swp)
return 0;
}
-static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
-{
- return 0;
-}
-
-static inline struct page *migration_entry_to_page(swp_entry_t entry)
-{
- return NULL;
-}
-
static inline void make_migration_entry_read(swp_entry_t *entryp) { }
static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
spinlock_t *ptl) { }
@@ -241,6 +195,29 @@ static inline int is_write_migration_entry(swp_entry_t entry)
#endif
+static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry)
+{
+ struct page *p = pfn_to_page(swp_offset(entry));
+
+ /*
+ * Any use of migration entries may only occur while the
+ * corresponding page is locked
+ */
+ BUG_ON(is_migration_entry(entry) && !PageLocked(p));
+
+ return p;
+}
+
+/*
+ * A pfn swap entry is a special type of swap entry that always has a pfn stored
+ * in the swap offset. They are used to represent unaddressable device memory
+ * and to restrict access to a page undergoing migration.
+ */
+static inline bool is_pfn_swap_entry(swp_entry_t entry)
+{
+ return is_migration_entry(entry) || is_device_private_entry(entry);
+}
+
struct page_vma_mapped_walk;
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
@@ -214,7 +214,7 @@ static inline bool hmm_is_device_private_entry(struct hmm_range *range,
swp_entry_t entry)
{
return is_device_private_entry(entry) &&
- device_private_entry_to_page(entry)->pgmap->owner ==
+ pfn_swap_entry_to_page(entry)->pgmap->owner ==
range->dev_private_owner;
}
@@ -257,8 +257,7 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
cpu_flags = HMM_PFN_VALID;
if (is_write_device_private_entry(entry))
cpu_flags |= HMM_PFN_WRITE;
- *hmm_pfn = device_private_entry_to_pfn(entry) |
- cpu_flags;
+ *hmm_pfn = swp_offset(entry) | cpu_flags;
return 0;
}
@@ -1694,7 +1694,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
VM_BUG_ON(!is_pmd_migration_entry(orig_pmd));
entry = pmd_to_swp_entry(orig_pmd);
- page = migration_entry_to_page(entry);
+ page = pfn_swap_entry_to_page(entry);
flush_needed = 0;
} else
WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
@@ -2102,7 +2102,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
swp_entry_t entry;
entry = pmd_to_swp_entry(old_pmd);
- page = migration_entry_to_page(entry);
+ page = pfn_swap_entry_to_page(entry);
write = is_write_migration_entry(entry);
young = false;
soft_dirty = pmd_swp_soft_dirty(old_pmd);
@@ -5349,7 +5349,7 @@ static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
* as special swap entry in the CPU page table.
*/
if (is_device_private_entry(ent)) {
- page = device_private_entry_to_page(ent);
+ page = pfn_swap_entry_to_page(ent);
/*
* MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have
* a refcount of 1 when free (unlike normal page)
@@ -730,7 +730,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
}
rss[MM_SWAPENTS]++;
} else if (is_migration_entry(entry)) {
- page = migration_entry_to_page(entry);
+ page = pfn_swap_entry_to_page(entry);
rss[mm_counter(page)]++;
@@ -749,7 +749,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
set_pte_at(src_mm, addr, src_pte, pte);
}
} else if (is_device_private_entry(entry)) {
- page = device_private_entry_to_page(entry);
+ page = pfn_swap_entry_to_page(entry);
/*
* Update rss count even for unaddressable pages, as
@@ -1280,7 +1280,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
entry = pte_to_swp_entry(ptent);
if (is_device_private_entry(entry)) {
- struct page *page = device_private_entry_to_page(entry);
+ struct page *page = pfn_swap_entry_to_page(entry);
if (unlikely(details && details->check_mapping)) {
/*
@@ -1309,7 +1309,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
else if (is_migration_entry(entry)) {
struct page *page;
- page = migration_entry_to_page(entry);
+ page = pfn_swap_entry_to_page(entry);
rss[mm_counter(page)]--;
}
if (unlikely(!free_swap_and_cache(entry)))
@@ -3327,7 +3327,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
migration_entry_wait(vma->vm_mm, vmf->pmd,
vmf->address);
} else if (is_device_private_entry(entry)) {
- vmf->page = device_private_entry_to_page(entry);
+ vmf->page = pfn_swap_entry_to_page(entry);
ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
} else if (is_hwpoison_entry(entry)) {
ret = VM_FAULT_HWPOISON;
@@ -294,7 +294,7 @@ void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
if (!is_migration_entry(entry))
goto out;
- page = migration_entry_to_page(entry);
+ page = pfn_swap_entry_to_page(entry);
/*
* Once page cache replacement of page migration started, page_count
@@ -334,7 +334,7 @@ void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
ptl = pmd_lock(mm, pmd);
if (!is_pmd_migration_entry(*pmd))
goto unlock;
- page = migration_entry_to_page(pmd_to_swp_entry(*pmd));
+ page = pfn_swap_entry_to_page(pmd_to_swp_entry(*pmd));
if (!get_page_unless_zero(page))
goto unlock;
spin_unlock(ptl);
@@ -2399,7 +2399,7 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
if (!is_device_private_entry(entry))
goto next;
- page = device_private_entry_to_page(entry);
+ page = pfn_swap_entry_to_page(entry);
if (!(migrate->flags &
MIGRATE_VMA_SELECT_DEVICE_PRIVATE) ||
page->pgmap->owner != migrate->pgmap_owner)
@@ -96,7 +96,7 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw)
if (!is_migration_entry(entry))
return false;
- pfn = migration_entry_to_pfn(entry);
+ pfn = swp_offset(entry);
} else if (is_swap_pte(*pvmw->pte)) {
swp_entry_t entry;
@@ -105,7 +105,7 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw)
if (!is_device_private_entry(entry))
return false;
- pfn = device_private_entry_to_pfn(entry);
+ pfn = swp_offset(entry);
} else {
if (!pte_present(*pvmw->pte))
return false;
@@ -200,7 +200,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
if (is_migration_entry(pmd_to_swp_entry(*pvmw->pmd))) {
swp_entry_t entry = pmd_to_swp_entry(*pvmw->pmd);
- if (migration_entry_to_page(entry) != page)
+ if (pfn_swap_entry_to_page(entry) != page)
return not_found(pvmw);
return true;
}