@@ -153,6 +153,15 @@ attribute, e. g.::
Setting this parameter to 100 will disable the hysteresis.
+Many users cannot tolerate the swapping that comes with zswap store failures,
+due to the IO incurred if these pages are needed later on. In this scenario,
+users can bypass swapping when zswap store attempts fail (and keep the pages
+in memory) as follows:
+
+ echo Y > /sys/module/zswap/parameters/bypass_swap_when_store_fail_enabled
+
+Note that swapping due to writeback is not disabled with this option.
+
When there is a sizable amount of cold memory residing in the zswap pool, it
can be advantageous to proactively write these cold pages to swap and reclaim
the memory for other use cases. By default, the zswap shrinker is disabled.
@@ -7,6 +7,7 @@
extern u64 zswap_pool_total_size;
extern atomic_t zswap_stored_pages;
+extern bool zswap_bypass_swap_when_store_fail_enabled;
#ifdef CONFIG_ZSWAP
@@ -18,6 +19,10 @@ void zswap_swapoff(int type);
bool zswap_remove_swpentry_from_lru(swp_entry_t swpentry);
void zswap_insert_swpentry_into_lru(swp_entry_t swpentry);
+static inline bool zswap_bypass_swap_when_store_fail(void)
+{
+ return zswap_bypass_swap_when_store_fail_enabled;
+}
#else
static inline bool zswap_store(struct folio *folio)
@@ -41,6 +46,10 @@ static inline bool zswap_remove_swpentry_from_lru(swp_entry_t swpentry)
static inline void zswap_insert_swpentry_into_lru(swp_entry_t swpentry) {}
+static inline bool zswap_bypass_swap_when_store_fail(void)
+{
+ return false;
+}
#endif
#endif /* _LINUX_ZSWAP_H */
@@ -201,6 +201,12 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
folio_end_writeback(folio);
return 0;
}
+
+ if (zswap_bypass_swap_when_store_fail()) {
+ folio_mark_dirty(folio);
+ return AOP_WRITEPAGE_ACTIVATE;
+ }
+
__swap_writepage(&folio->page, wbc);
return 0;
}
@@ -1514,8 +1514,12 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
mutex_unlock(&shmem_swaplist_mutex);
BUG_ON(folio_mapped(folio));
- swap_writepage(&folio->page, wbc);
- return 0;
+ /*
+ * Seeing AOP_WRITEPAGE_ACTIVATE here indicates swapping is disabled on
+ * zswap store failure. Note that in that case the folio is already
+ * re-marked dirty by swap_writepage()
+ */
+ return swap_writepage(&folio->page, wbc);
}
mutex_unlock(&shmem_swaplist_mutex);
@@ -138,6 +138,10 @@ static bool zswap_non_same_filled_pages_enabled = true;
module_param_named(non_same_filled_pages_enabled, zswap_non_same_filled_pages_enabled,
bool, 0644);
+bool zswap_bypass_swap_when_store_fail_enabled;
+module_param_named(bypass_swap_when_store_fail_enabled,
+ zswap_bypass_swap_when_store_fail_enabled, bool, 0644);
+
static bool zswap_exclusive_loads_enabled = IS_ENABLED(
CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON);
module_param_named(exclusive_loads, zswap_exclusive_loads_enabled, bool, 0644);
During our experiment with zswap, we sometimes observe swap IOs even though the zswap pool limit is never hit. This is due to occasional zswap store failures, in which case the page will be written straight to the swapping device. This prevents many users who cannot tolerate swapping from adopting zswap to save memory where possible. This patch adds the option to bypass swap when a zswap store fails. The feature is disabled by default (to preserve the existing behavior), and can be enabled via a new zswap module parameter. When enabled, swapping is all but prevented (except for when the zswap pool is full and have to write pages back to swap). Suggested-by: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Nhat Pham <nphamcs@gmail.com> --- Documentation/admin-guide/mm/zswap.rst | 9 +++++++++ include/linux/zswap.h | 9 +++++++++ mm/page_io.c | 6 ++++++ mm/shmem.c | 8 ++++++-- mm/zswap.c | 4 ++++ 5 files changed, 34 insertions(+), 2 deletions(-)