@@ -1268,7 +1268,23 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
zspage = get_zspage(page);
#ifdef CONFIG_ZPOOL
- /* Move the zspage to front of pool's LRU */
+ /*
+ * Move the zspage to front of pool's LRU.
+ *
+ * Note that this is swap-specific, so by definition there are no ongoing
+ * accesses to the memory while the page is swapped out that would make
+ * it "hot". A new entry is hot, then ages to the tail until it gets either
+ * written back or swaps back in.
+ *
+ * Furthermore, map is also called during writeback. We must not put an
+ * isolated page on the LRU mid-reclaim.
+ *
+ * As a result, only update the LRU when the page is mapped for write
+ * when it's first instantiated.
+ *
+ * This is a deviation from the other backends, which perform this update
+ * in the allocation function (zbud_alloc, z3fold_alloc).
+ */
if (mm == ZS_MM_WO) {
if (!list_empty(&zspage->lru))
list_del(&zspage->lru);
Add a comment explaining the mapping check for LRU update. Signed-off-by: Nhat Pham <nphamcs@gmail.com> Suggested-by: Johannes Weiner <hannes@cmpxchg.org> Suggested-by: Sergey Senozhatsky <senozhatsky@chromium.org> --- mm/zsmalloc.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) -- 2.30.2