@@ -4042,6 +4042,15 @@ static struct folio *__alloc_swap_folio(struct vm_fault *vmf)
return folio;
}
+#define BATCH_SWPIN_ORDER 2
+#define BATCH_SWPIN_COUNT (1 << BATCH_SWPIN_ORDER)
+#define BATCH_SWPIN_SIZE (PAGE_SIZE << BATCH_SWPIN_ORDER)
+
+struct batch_swpin_buffer {
+ struct folio *folio;
+ struct mutex mutex;
+};
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline int non_swapcache_batch(swp_entry_t entry, int max_nr)
{
@@ -4120,7 +4129,101 @@ static inline unsigned long thp_swap_suitable_orders(pgoff_t swp_offset,
return orders;
}
-static struct folio *alloc_swap_folio(struct vm_fault *vmf)
+static DEFINE_PER_CPU(struct batch_swpin_buffer, swp_buf);
+
+static int __init batch_swpin_buffer_init(void)
+{
+ int ret, cpu;
+ struct batch_swpin_buffer *buf;
+
+ for_each_possible_cpu(cpu) {
+ buf = per_cpu_ptr(&swp_buf, cpu);
+ buf->folio = (struct folio *)alloc_pages_node(cpu_to_node(cpu),
+ GFP_KERNEL | __GFP_COMP, BATCH_SWPIN_ORDER);
+ if (!buf->folio) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ mutex_init(&buf->mutex);
+ }
+ return 0;
+
+err:
+ for_each_possible_cpu(cpu) {
+ buf = per_cpu_ptr(&swp_buf, cpu);
+ if (buf->folio) {
+ folio_put(buf->folio);
+ buf->folio = NULL;
+ }
+ }
+ return ret;
+}
+core_initcall(batch_swpin_buffer_init);
+
+static struct folio *alloc_batched_swap_folios(struct vm_fault *vmf,
+ struct batch_swpin_buffer **buf, struct folio **folios,
+ swp_entry_t entry)
+{
+ unsigned long haddr = ALIGN_DOWN(vmf->address, BATCH_SWPIN_SIZE);
+ struct batch_swpin_buffer *sbuf = raw_cpu_ptr(&swp_buf);
+ struct folio *folio = sbuf->folio;
+ unsigned long addr;
+ int i;
+
+ if (unlikely(!folio))
+ return NULL;
+
+ for (i = 0; i < BATCH_SWPIN_COUNT; i++) {
+ addr = haddr + i * PAGE_SIZE;
+ folios[i] = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vmf->vma, addr);
+ if (!folios[i])
+ goto err;
+ if (mem_cgroup_swapin_charge_folio(folios[i], vmf->vma->vm_mm,
+ GFP_KERNEL, entry))
+ goto err;
+ }
+
+ mutex_lock(&sbuf->mutex);
+ *buf = sbuf;
+#ifdef CONFIG_MEMCG
+ folio->memcg_data = (*folios)->memcg_data;
+#endif
+ return folio;
+
+err:
+ for (i--; i >= 0; i--)
+ folio_put(folios[i]);
+ return NULL;
+}
+
+static void fill_batched_swap_folios(struct vm_fault *vmf,
+ void *shadow, struct batch_swpin_buffer *buf,
+ struct folio *folio, struct folio **folios)
+{
+ unsigned long haddr = ALIGN_DOWN(vmf->address, BATCH_SWPIN_SIZE);
+ unsigned long addr;
+ int i;
+
+ for (i = 0; i < BATCH_SWPIN_COUNT; i++) {
+ addr = haddr + i * PAGE_SIZE;
+ __folio_set_locked(folios[i]);
+ __folio_set_swapbacked(folios[i]);
+ if (shadow)
+ workingset_refault(folios[i], shadow);
+ folio_add_lru(folios[i]);
+ copy_user_highpage(&folios[i]->page, folio_page(folio, i),
+ addr, vmf->vma);
+ if (folio_test_uptodate(folio))
+ folio_mark_uptodate(folios[i]);
+ }
+
+ folio->flags &= ~(PAGE_FLAGS_CHECK_AT_PREP & ~(1UL << PG_head));
+ mutex_unlock(&buf->mutex);
+}
+
+static struct folio *alloc_swap_folio(struct vm_fault *vmf,
+ struct batch_swpin_buffer **buf,
+ struct folio **folios)
{
struct vm_area_struct *vma = vmf->vma;
unsigned long orders;
@@ -4180,6 +4283,9 @@ static struct folio *alloc_swap_folio(struct vm_fault *vmf)
pte_unmap_unlock(pte, ptl);
+ if (!orders)
+ goto fallback;
+
/* Try allocating the highest of the remaining orders. */
gfp = vma_thp_gfp_mask(vma);
while (orders) {
@@ -4194,14 +4300,29 @@ static struct folio *alloc_swap_folio(struct vm_fault *vmf)
order = next_order(&orders, order);
}
+ /*
+ * During swap-out, a THP might have been compressed into multiple
+ * order-2 blocks to optimize CPU usage and compression ratio.
+ * Attempt to batch swap-in 4 smaller folios to ensure they are
+ * decompressed together as a single unit only once.
+ */
+ return alloc_batched_swap_folios(vmf, buf, folios, entry);
+
fallback:
return __alloc_swap_folio(vmf);
}
#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
-static struct folio *alloc_swap_folio(struct vm_fault *vmf)
+static struct folio *alloc_swap_folio(struct vm_fault *vmf,
+ struct batch_swpin_buffer **buf,
+ struct folio **folios)
{
return __alloc_swap_folio(vmf);
}
+static inline void fill_batched_swap_folios(struct vm_fault *vmf,
+ void *shadow, struct batch_swpin_buffer *buf,
+ struct folio *folio, struct folio **folios)
+{
+}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
static DECLARE_WAIT_QUEUE_HEAD(swapcache_wq);
@@ -4216,6 +4337,8 @@ static DECLARE_WAIT_QUEUE_HEAD(swapcache_wq);
*/
vm_fault_t do_swap_page(struct vm_fault *vmf)
{
+ struct folio *folios[BATCH_SWPIN_COUNT] = { NULL };
+ struct batch_swpin_buffer *buf = NULL;
struct vm_area_struct *vma = vmf->vma;
struct folio *swapcache, *folio = NULL;
DECLARE_WAITQUEUE(wait, current);
@@ -4228,7 +4351,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
pte_t pte;
vm_fault_t ret = 0;
void *shadow = NULL;
- int nr_pages;
+ int nr_pages, i;
unsigned long page_idx;
unsigned long address;
pte_t *ptep;
@@ -4296,7 +4419,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
__swap_count(entry) == 1) {
/* skip swapcache */
- folio = alloc_swap_folio(vmf);
+ folio = alloc_swap_folio(vmf, &buf, folios);
if (folio) {
__folio_set_locked(folio);
__folio_set_swapbacked(folio);
@@ -4327,10 +4450,10 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
mem_cgroup_swapin_uncharge_swap(entry, nr_pages);
shadow = get_shadow_from_swap_cache(entry);
- if (shadow)
+ if (shadow && !buf)
workingset_refault(folio, shadow);
-
- folio_add_lru(folio);
+ if (!buf)
+ folio_add_lru(folio);
/* To provide entry to swap_read_folio() */
folio->swap = entry;
@@ -4361,6 +4484,16 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
count_vm_event(PGMAJFAULT);
count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
page = folio_file_page(folio, swp_offset(entry));
+ /*
+ * Copy data into batched small folios from the large
+ * folio buffer
+ */
+ if (buf) {
+ fill_batched_swap_folios(vmf, shadow, buf, folio, folios);
+ folio = folios[0];
+ page = &folios[0]->page;
+ goto do_map;
+ }
} else if (PageHWPoison(page)) {
/*
* hwpoisoned dirty swapcache pages are kept for killing
@@ -4415,6 +4548,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
lru_add_drain();
}
+do_map:
folio_throttle_swaprate(folio, GFP_KERNEL);
/*
@@ -4431,8 +4565,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
}
/* allocated large folios for SWP_SYNCHRONOUS_IO */
- if (folio_test_large(folio) && !folio_test_swapcache(folio)) {
- unsigned long nr = folio_nr_pages(folio);
+ if ((folio_test_large(folio) || buf) && !folio_test_swapcache(folio)) {
+ unsigned long nr = buf ? BATCH_SWPIN_COUNT : folio_nr_pages(folio);
unsigned long folio_start = ALIGN_DOWN(vmf->address, nr * PAGE_SIZE);
unsigned long idx = (vmf->address - folio_start) / PAGE_SIZE;
pte_t *folio_ptep = vmf->pte - idx;
@@ -4527,6 +4661,42 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
}
}
+ /* Batched mapping of allocated small folios for SWP_SYNCHRONOUS_IO */
+ if (buf) {
+ for (i = 0; i < nr_pages; i++)
+ arch_swap_restore(swp_entry(swp_type(entry),
+ swp_offset(entry) + i), folios[i]);
+ swap_free_nr(entry, nr_pages);
+ add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
+ add_mm_counter(vma->vm_mm, MM_SWAPENTS, -nr_pages);
+ rmap_flags |= RMAP_EXCLUSIVE;
+ for (i = 0; i < nr_pages; i++) {
+ unsigned long addr = address + i * PAGE_SIZE;
+
+ pte = mk_pte(&folios[i]->page, vma->vm_page_prot);
+ if (pte_swp_soft_dirty(vmf->orig_pte))
+ pte = pte_mksoft_dirty(pte);
+ if (pte_swp_uffd_wp(vmf->orig_pte))
+ pte = pte_mkuffd_wp(pte);
+ if ((vma->vm_flags & VM_WRITE) && !userfaultfd_pte_wp(vma, pte) &&
+ !pte_needs_soft_dirty_wp(vma, pte)) {
+ pte = pte_mkwrite(pte, vma);
+ if ((vmf->flags & FAULT_FLAG_WRITE) && (i == page_idx)) {
+ pte = pte_mkdirty(pte);
+ vmf->flags &= ~FAULT_FLAG_WRITE;
+ }
+ }
+ flush_icache_page(vma, &folios[i]->page);
+ folio_add_new_anon_rmap(folios[i], vma, addr, rmap_flags);
+ set_pte_at(vma->vm_mm, addr, ptep + i, pte);
+ arch_do_swap_page_nr(vma->vm_mm, vma, addr, pte, pte, 1);
+ if (i == page_idx)
+ vmf->orig_pte = pte;
+ folio_unlock(folios[i]);
+ }
+ goto wp_page;
+ }
+
/*
* Some architectures may have to restore extra metadata to the page
* when reading from swap. This metadata may be indexed by swap entry
@@ -4612,6 +4782,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
folio_put(swapcache);
}
+wp_page:
if (vmf->flags & FAULT_FLAG_WRITE) {
ret |= do_wp_page(vmf);
if (ret & VM_FAULT_ERROR)
@@ -4638,9 +4809,19 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
if (vmf->pte)
pte_unmap_unlock(vmf->pte, vmf->ptl);
out_page:
- folio_unlock(folio);
+ if (!buf) {
+ folio_unlock(folio);
+ } else {
+ for (i = 0; i < BATCH_SWPIN_COUNT; i++)
+ folio_unlock(folios[i]);
+ }
out_release:
- folio_put(folio);
+ if (!buf) {
+ folio_put(folio);
+ } else {
+ for (i = 0; i < BATCH_SWPIN_COUNT; i++)
+ folio_put(folios[i]);
+ }
if (folio != swapcache && swapcache) {
folio_unlock(swapcache);
folio_put(swapcache);