Message ID | 20240614100902.3469724-3-usamaarif642@gmail.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | mm: store zero pages to be swapped out in a bitmap | expand |
On 2024/6/14 18:07, Usama Arif wrote: > With an earlier commit to handle zero-filled pages in swap directly, > and with only 1% of the same-filled pages being non-zero, zswap no > longer needs to handle same-filled pages and can just work on compressed > pages. > > Signed-off-by: Usama Arif <usamaarif642@gmail.com> Looks good to me, thanks. Reviewed-by: Chengming Zhou <chengming.zhou@linux.dev> > --- > mm/zswap.c | 86 +++++------------------------------------------------- > 1 file changed, 8 insertions(+), 78 deletions(-) > > diff --git a/mm/zswap.c b/mm/zswap.c > index a546c01602aa..e25a6808c2ed 100644 > --- a/mm/zswap.c > +++ b/mm/zswap.c > @@ -44,8 +44,6 @@ > **********************************/ > /* The number of compressed pages currently stored in zswap */ > atomic_t zswap_stored_pages = ATOMIC_INIT(0); > -/* The number of same-value filled pages currently stored in zswap */ > -static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0); > > /* > * The statistics below are not protected from concurrent access for > @@ -188,11 +186,9 @@ static struct shrinker *zswap_shrinker; > * > * swpentry - associated swap entry, the offset indexes into the red-black tree > * length - the length in bytes of the compressed page data. Needed during > - * decompression. For a same value filled page length is 0, and both > - * pool and lru are invalid and must be ignored. > + * decompression. > * pool - the zswap_pool the entry's data is in > * handle - zpool allocation handle that stores the compressed page data > - * value - value of the same-value filled pages which have same content > * objcg - the obj_cgroup that the compressed memory is charged to > * lru - handle to the pool's lru used to evict pages. > */ > @@ -200,10 +196,7 @@ struct zswap_entry { > swp_entry_t swpentry; > unsigned int length; > struct zswap_pool *pool; > - union { > - unsigned long handle; > - unsigned long value; > - }; > + unsigned long handle; > struct obj_cgroup *objcg; > struct list_head lru; > }; > @@ -820,13 +813,9 @@ static struct zpool *zswap_find_zpool(struct zswap_entry *entry) > */ > static void zswap_entry_free(struct zswap_entry *entry) > { > - if (!entry->length) > - atomic_dec(&zswap_same_filled_pages); > - else { > - zswap_lru_del(&zswap_list_lru, entry); > - zpool_free(zswap_find_zpool(entry), entry->handle); > - zswap_pool_put(entry->pool); > - } > + zswap_lru_del(&zswap_list_lru, entry); > + zpool_free(zswap_find_zpool(entry), entry->handle); > + zswap_pool_put(entry->pool); > if (entry->objcg) { > obj_cgroup_uncharge_zswap(entry->objcg, entry->length); > obj_cgroup_put(entry->objcg); > @@ -1268,11 +1257,6 @@ static unsigned long zswap_shrinker_count(struct shrinker *shrinker, > * This ensures that the better zswap compresses memory, the fewer > * pages we will evict to swap (as it will otherwise incur IO for > * relatively small memory saving). > - * > - * The memory saving factor calculated here takes same-filled pages into > - * account, but those are not freeable since they almost occupy no > - * space. Hence, we may scale nr_freeable down a little bit more than we > - * should if we have a lot of same-filled pages. > */ > return mult_frac(nr_freeable, nr_backing, nr_stored); > } > @@ -1376,42 +1360,6 @@ static void shrink_worker(struct work_struct *w) > } while (zswap_total_pages() > thr); > } > > -/********************************* > -* same-filled functions > -**********************************/ > -static bool zswap_is_folio_same_filled(struct folio *folio, unsigned long *value) > -{ > - unsigned long *data; > - unsigned long val; > - unsigned int pos, last_pos = PAGE_SIZE / sizeof(*data) - 1; > - bool ret = false; > - > - data = kmap_local_folio(folio, 0); > - val = data[0]; > - > - if (val != data[last_pos]) > - goto out; > - > - for (pos = 1; pos < last_pos; pos++) { > - if (val != data[pos]) > - goto out; > - } > - > - *value = val; > - ret = true; > -out: > - kunmap_local(data); > - return ret; > -} > - > -static void zswap_fill_folio(struct folio *folio, unsigned long value) > -{ > - unsigned long *data = kmap_local_folio(folio, 0); > - > - memset_l(data, value, PAGE_SIZE / sizeof(unsigned long)); > - kunmap_local(data); > -} > - > /********************************* > * main API > **********************************/ > @@ -1423,7 +1371,6 @@ bool zswap_store(struct folio *folio) > struct zswap_entry *entry, *old; > struct obj_cgroup *objcg = NULL; > struct mem_cgroup *memcg = NULL; > - unsigned long value; > > VM_WARN_ON_ONCE(!folio_test_locked(folio)); > VM_WARN_ON_ONCE(!folio_test_swapcache(folio)); > @@ -1456,13 +1403,6 @@ bool zswap_store(struct folio *folio) > goto reject; > } > > - if (zswap_is_folio_same_filled(folio, &value)) { > - entry->length = 0; > - entry->value = value; > - atomic_inc(&zswap_same_filled_pages); > - goto store_entry; > - } > - > /* if entry is successfully added, it keeps the reference */ > entry->pool = zswap_pool_current_get(); > if (!entry->pool) > @@ -1480,7 +1420,6 @@ bool zswap_store(struct folio *folio) > if (!zswap_compress(folio, entry)) > goto put_pool; > > -store_entry: > entry->swpentry = swp; > entry->objcg = objcg; > > @@ -1528,13 +1467,9 @@ bool zswap_store(struct folio *folio) > return true; > > store_failed: > - if (!entry->length) > - atomic_dec(&zswap_same_filled_pages); > - else { > - zpool_free(zswap_find_zpool(entry), entry->handle); > + zpool_free(zswap_find_zpool(entry), entry->handle); > put_pool: > - zswap_pool_put(entry->pool); > - } > + zswap_pool_put(entry->pool); > freepage: > zswap_entry_cache_free(entry); > reject: > @@ -1597,10 +1532,7 @@ bool zswap_load(struct folio *folio) > if (!entry) > return false; > > - if (entry->length) > - zswap_decompress(entry, folio); > - else > - zswap_fill_folio(folio, entry->value); > + zswap_decompress(entry, folio); > > count_vm_event(ZSWPIN); > if (entry->objcg) > @@ -1703,8 +1635,6 @@ static int zswap_debugfs_init(void) > zswap_debugfs_root, NULL, &total_size_fops); > debugfs_create_atomic_t("stored_pages", 0444, > zswap_debugfs_root, &zswap_stored_pages); > - debugfs_create_atomic_t("same_filled_pages", 0444, > - zswap_debugfs_root, &zswap_same_filled_pages); > > return 0; > }
On Fri, Jun 14, 2024 at 3:09 AM Usama Arif <usamaarif642@gmail.com> wrote: > > With an earlier commit to handle zero-filled pages in swap directly, > and with only 1% of the same-filled pages being non-zero, zswap no > longer needs to handle same-filled pages and can just work on compressed > pages. > > Signed-off-by: Usama Arif <usamaarif642@gmail.com> Acked-by: Yosry Ahmed <yosryahmed@google.com>
On Fri, Jun 14, 2024 at 3:09 AM Usama Arif <usamaarif642@gmail.com> wrote: > > With an earlier commit to handle zero-filled pages in swap directly, > and with only 1% of the same-filled pages being non-zero, zswap no > longer needs to handle same-filled pages and can just work on compressed > pages. > > Signed-off-by: Usama Arif <usamaarif642@gmail.com> > --- Other than my previous comments to re-add the stat counter somehow (which will probably helps with testing in the future), this series LGTM! Reviewed-by: Nhat Pham <nphamcs@gmail.com>
diff --git a/mm/zswap.c b/mm/zswap.c index a546c01602aa..e25a6808c2ed 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -44,8 +44,6 @@ **********************************/ /* The number of compressed pages currently stored in zswap */ atomic_t zswap_stored_pages = ATOMIC_INIT(0); -/* The number of same-value filled pages currently stored in zswap */ -static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0); /* * The statistics below are not protected from concurrent access for @@ -188,11 +186,9 @@ static struct shrinker *zswap_shrinker; * * swpentry - associated swap entry, the offset indexes into the red-black tree * length - the length in bytes of the compressed page data. Needed during - * decompression. For a same value filled page length is 0, and both - * pool and lru are invalid and must be ignored. + * decompression. * pool - the zswap_pool the entry's data is in * handle - zpool allocation handle that stores the compressed page data - * value - value of the same-value filled pages which have same content * objcg - the obj_cgroup that the compressed memory is charged to * lru - handle to the pool's lru used to evict pages. */ @@ -200,10 +196,7 @@ struct zswap_entry { swp_entry_t swpentry; unsigned int length; struct zswap_pool *pool; - union { - unsigned long handle; - unsigned long value; - }; + unsigned long handle; struct obj_cgroup *objcg; struct list_head lru; }; @@ -820,13 +813,9 @@ static struct zpool *zswap_find_zpool(struct zswap_entry *entry) */ static void zswap_entry_free(struct zswap_entry *entry) { - if (!entry->length) - atomic_dec(&zswap_same_filled_pages); - else { - zswap_lru_del(&zswap_list_lru, entry); - zpool_free(zswap_find_zpool(entry), entry->handle); - zswap_pool_put(entry->pool); - } + zswap_lru_del(&zswap_list_lru, entry); + zpool_free(zswap_find_zpool(entry), entry->handle); + zswap_pool_put(entry->pool); if (entry->objcg) { obj_cgroup_uncharge_zswap(entry->objcg, entry->length); obj_cgroup_put(entry->objcg); @@ -1268,11 +1257,6 @@ static unsigned long zswap_shrinker_count(struct shrinker *shrinker, * This ensures that the better zswap compresses memory, the fewer * pages we will evict to swap (as it will otherwise incur IO for * relatively small memory saving). - * - * The memory saving factor calculated here takes same-filled pages into - * account, but those are not freeable since they almost occupy no - * space. Hence, we may scale nr_freeable down a little bit more than we - * should if we have a lot of same-filled pages. */ return mult_frac(nr_freeable, nr_backing, nr_stored); } @@ -1376,42 +1360,6 @@ static void shrink_worker(struct work_struct *w) } while (zswap_total_pages() > thr); } -/********************************* -* same-filled functions -**********************************/ -static bool zswap_is_folio_same_filled(struct folio *folio, unsigned long *value) -{ - unsigned long *data; - unsigned long val; - unsigned int pos, last_pos = PAGE_SIZE / sizeof(*data) - 1; - bool ret = false; - - data = kmap_local_folio(folio, 0); - val = data[0]; - - if (val != data[last_pos]) - goto out; - - for (pos = 1; pos < last_pos; pos++) { - if (val != data[pos]) - goto out; - } - - *value = val; - ret = true; -out: - kunmap_local(data); - return ret; -} - -static void zswap_fill_folio(struct folio *folio, unsigned long value) -{ - unsigned long *data = kmap_local_folio(folio, 0); - - memset_l(data, value, PAGE_SIZE / sizeof(unsigned long)); - kunmap_local(data); -} - /********************************* * main API **********************************/ @@ -1423,7 +1371,6 @@ bool zswap_store(struct folio *folio) struct zswap_entry *entry, *old; struct obj_cgroup *objcg = NULL; struct mem_cgroup *memcg = NULL; - unsigned long value; VM_WARN_ON_ONCE(!folio_test_locked(folio)); VM_WARN_ON_ONCE(!folio_test_swapcache(folio)); @@ -1456,13 +1403,6 @@ bool zswap_store(struct folio *folio) goto reject; } - if (zswap_is_folio_same_filled(folio, &value)) { - entry->length = 0; - entry->value = value; - atomic_inc(&zswap_same_filled_pages); - goto store_entry; - } - /* if entry is successfully added, it keeps the reference */ entry->pool = zswap_pool_current_get(); if (!entry->pool) @@ -1480,7 +1420,6 @@ bool zswap_store(struct folio *folio) if (!zswap_compress(folio, entry)) goto put_pool; -store_entry: entry->swpentry = swp; entry->objcg = objcg; @@ -1528,13 +1467,9 @@ bool zswap_store(struct folio *folio) return true; store_failed: - if (!entry->length) - atomic_dec(&zswap_same_filled_pages); - else { - zpool_free(zswap_find_zpool(entry), entry->handle); + zpool_free(zswap_find_zpool(entry), entry->handle); put_pool: - zswap_pool_put(entry->pool); - } + zswap_pool_put(entry->pool); freepage: zswap_entry_cache_free(entry); reject: @@ -1597,10 +1532,7 @@ bool zswap_load(struct folio *folio) if (!entry) return false; - if (entry->length) - zswap_decompress(entry, folio); - else - zswap_fill_folio(folio, entry->value); + zswap_decompress(entry, folio); count_vm_event(ZSWPIN); if (entry->objcg) @@ -1703,8 +1635,6 @@ static int zswap_debugfs_init(void) zswap_debugfs_root, NULL, &total_size_fops); debugfs_create_atomic_t("stored_pages", 0444, zswap_debugfs_root, &zswap_stored_pages); - debugfs_create_atomic_t("same_filled_pages", 0444, - zswap_debugfs_root, &zswap_same_filled_pages); return 0; }
With an earlier commit to handle zero-filled pages in swap directly, and with only 1% of the same-filled pages being non-zero, zswap no longer needs to handle same-filled pages and can just work on compressed pages. Signed-off-by: Usama Arif <usamaarif642@gmail.com> --- mm/zswap.c | 86 +++++------------------------------------------------- 1 file changed, 8 insertions(+), 78 deletions(-)