@@ -11,8 +11,14 @@
#include <linux/types.h>
-/* 31 pointers + header align the folio_batch structure to a power of two */
-#define PAGEVEC_SIZE 31
+/*
+ * For page-cluster of 5, I noticed that space for 31 pointers was
+ * insufficient. Increasing this to meet the requirements for folio_batch
+ * usage in the swap read decompress batching interface that is based on
+ * swapin_readahead().
+ */
+#define SWAP_RA_ORDER_CEILING 5
+#define PAGEVEC_SIZE (1UL << SWAP_RA_ORDER_CEILING)
struct folio;
@@ -74,7 +80,8 @@ static inline unsigned int folio_batch_space(struct folio_batch *fbatch)
static inline unsigned folio_batch_add(struct folio_batch *fbatch,
struct folio *folio)
{
- fbatch->folios[fbatch->nr++] = folio;
+ if (folio_batch_space(fbatch) > 0)
+ fbatch->folios[fbatch->nr++] = folio;
return folio_batch_space(fbatch);
}
@@ -44,8 +44,6 @@ struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
static bool enable_vma_readahead __read_mostly = true;
-#define SWAP_RA_ORDER_CEILING 5
-
#define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2)
#define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1)
#define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK
Made these changes to "struct folio_batch" for use in the swapin_readahead() based zswap load batching interface for parallel decompressions with IAA: 1) Moved SWAP_RA_ORDER_CEILING definition to pagevec.h. 2) Increased PAGEVEC_SIZE to (1UL << SWAP_RA_ORDER_CEILING), because vm.page-cluster=5 requires capacity for 32 folios. 3) Made folio_batch_add() more fail-safe. Signed-off-by: Kanchana P Sridhar <kanchana.p.sridhar@intel.com> --- include/linux/pagevec.h | 13 ++++++++++--- mm/swap_state.c | 2 -- 2 files changed, 10 insertions(+), 5 deletions(-)