Message ID | 20191009211857.35587-1-minchan@kernel.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | fs: annotate refault stalls from bdev_read_page | expand |
On Wed, Oct 9, 2019 at 2:19 PM Minchan Kim <minchan@kernel.org> wrote: > > From: Minchan Kim <minchan@google.com> > > If block device supports rw_page operation, it doesn't submit bio > so annotation in submit_bio for refault stall doesn't work. > It happens with zram in android, especially swap read path which > could consume CPU cycle for decompress. What about zswap? Do we need the same in zswap_frontswap_load()? > > Annotate bdev_read_page() to account the synchronous IO overhead > to prevent underreport memory pressure. > > Cc: Johannes Weiner <hannes@cmpxchg.org> > Signed-off-by: Minchan Kim <minchan@google.com> > --- > fs/block_dev.c | 13 +++++++++++++ > mm/memory.c | 1 + > 2 files changed, 14 insertions(+) > > diff --git a/fs/block_dev.c b/fs/block_dev.c > index 9c073dbdc1b0..82ca28eb9a57 100644 > --- a/fs/block_dev.c > +++ b/fs/block_dev.c > @@ -26,6 +26,7 @@ > #include <linux/writeback.h> > #include <linux/mpage.h> > #include <linux/mount.h> > +#include <linux/psi.h> > #include <linux/pseudo_fs.h> > #include <linux/uio.h> > #include <linux/namei.h> > @@ -701,6 +702,8 @@ int bdev_read_page(struct block_device *bdev, sector_t sector, > { > const struct block_device_operations *ops = bdev->bd_disk->fops; > int result = -EOPNOTSUPP; > + unsigned long pflags; > + bool workingset_read; > > if (!ops->rw_page || bdev_get_integrity(bdev)) > return result; > @@ -708,9 +711,19 @@ int bdev_read_page(struct block_device *bdev, sector_t sector, > result = blk_queue_enter(bdev->bd_queue, 0); > if (result) > return result; > + > + workingset_read = PageWorkingset(page); > + if (workingset_read) > + psi_memstall_enter(&pflags); > + > result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, > REQ_OP_READ); > + > + if (workingset_read) > + psi_memstall_leave(&pflags); > + > blk_queue_exit(bdev->bd_queue); > + > return result; > } > EXPORT_SYMBOL_GPL(bdev_read_page); > diff --git a/mm/memory.c b/mm/memory.c > index 06935826d71e..6357d5a0a2a5 100644 > --- a/mm/memory.c > +++ b/mm/memory.c > @@ -2801,6 +2801,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) > if (page) { > __SetPageLocked(page); > __SetPageSwapBacked(page); > + SetPageWorkingset(page); > set_page_private(page, entry.val); > lru_cache_add_anon(page); > swap_readpage(page, true); > -- > 2.23.0.581.g78d2f28ef7-goog >
On Wed, Oct 09, 2019 at 03:25:10PM -0700, Shakeel Butt wrote: > On Wed, Oct 9, 2019 at 2:19 PM Minchan Kim <minchan@kernel.org> wrote: > > > > From: Minchan Kim <minchan@google.com> > > > > If block device supports rw_page operation, it doesn't submit bio > > so annotation in submit_bio for refault stall doesn't work. > > It happens with zram in android, especially swap read path which > > could consume CPU cycle for decompress. > > What about zswap? Do we need the same in zswap_frontswap_load()? Yub, it needs it. Maybe, a annotation in swap_readpage will cover both all at once in real pratice unless we need to take care of nvdimms which supports rw_page operation. Thanks. > > > > > Annotate bdev_read_page() to account the synchronous IO overhead > > to prevent underreport memory pressure. > > > > Cc: Johannes Weiner <hannes@cmpxchg.org> > > Signed-off-by: Minchan Kim <minchan@google.com> > > --- > > fs/block_dev.c | 13 +++++++++++++ > > mm/memory.c | 1 + > > 2 files changed, 14 insertions(+) > > > > diff --git a/fs/block_dev.c b/fs/block_dev.c > > index 9c073dbdc1b0..82ca28eb9a57 100644 > > --- a/fs/block_dev.c > > +++ b/fs/block_dev.c > > @@ -26,6 +26,7 @@ > > #include <linux/writeback.h> > > #include <linux/mpage.h> > > #include <linux/mount.h> > > +#include <linux/psi.h> > > #include <linux/pseudo_fs.h> > > #include <linux/uio.h> > > #include <linux/namei.h> > > @@ -701,6 +702,8 @@ int bdev_read_page(struct block_device *bdev, sector_t sector, > > { > > const struct block_device_operations *ops = bdev->bd_disk->fops; > > int result = -EOPNOTSUPP; > > + unsigned long pflags; > > + bool workingset_read; > > > > if (!ops->rw_page || bdev_get_integrity(bdev)) > > return result; > > @@ -708,9 +711,19 @@ int bdev_read_page(struct block_device *bdev, sector_t sector, > > result = blk_queue_enter(bdev->bd_queue, 0); > > if (result) > > return result; > > + > > + workingset_read = PageWorkingset(page); > > + if (workingset_read) > > + psi_memstall_enter(&pflags); > > + > > result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, > > REQ_OP_READ); > > + > > + if (workingset_read) > > + psi_memstall_leave(&pflags); > > + > > blk_queue_exit(bdev->bd_queue); > > + > > return result; > > } > > EXPORT_SYMBOL_GPL(bdev_read_page); > > diff --git a/mm/memory.c b/mm/memory.c > > index 06935826d71e..6357d5a0a2a5 100644 > > --- a/mm/memory.c > > +++ b/mm/memory.c > > @@ -2801,6 +2801,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) > > if (page) { > > __SetPageLocked(page); > > __SetPageSwapBacked(page); > > + SetPageWorkingset(page); > > set_page_private(page, entry.val); > > lru_cache_add_anon(page); > > swap_readpage(page, true); > > -- > > 2.23.0.581.g78d2f28ef7-goog > >
diff --git a/fs/block_dev.c b/fs/block_dev.c index 9c073dbdc1b0..82ca28eb9a57 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -26,6 +26,7 @@ #include <linux/writeback.h> #include <linux/mpage.h> #include <linux/mount.h> +#include <linux/psi.h> #include <linux/pseudo_fs.h> #include <linux/uio.h> #include <linux/namei.h> @@ -701,6 +702,8 @@ int bdev_read_page(struct block_device *bdev, sector_t sector, { const struct block_device_operations *ops = bdev->bd_disk->fops; int result = -EOPNOTSUPP; + unsigned long pflags; + bool workingset_read; if (!ops->rw_page || bdev_get_integrity(bdev)) return result; @@ -708,9 +711,19 @@ int bdev_read_page(struct block_device *bdev, sector_t sector, result = blk_queue_enter(bdev->bd_queue, 0); if (result) return result; + + workingset_read = PageWorkingset(page); + if (workingset_read) + psi_memstall_enter(&pflags); + result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, REQ_OP_READ); + + if (workingset_read) + psi_memstall_leave(&pflags); + blk_queue_exit(bdev->bd_queue); + return result; } EXPORT_SYMBOL_GPL(bdev_read_page); diff --git a/mm/memory.c b/mm/memory.c index 06935826d71e..6357d5a0a2a5 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2801,6 +2801,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) if (page) { __SetPageLocked(page); __SetPageSwapBacked(page); + SetPageWorkingset(page); set_page_private(page, entry.val); lru_cache_add_anon(page); swap_readpage(page, true);