Message ID | 20200827092417.7973-1-cmaiolino@redhat.com (mailing list archive) |
---|---|
State | Accepted, archived |
Headers | show |
Series | [V3] xfs: Remove kmem_zalloc_large() | expand |
On Thu, Aug 27, 2020 at 11:24:17AM +0200, Carlos Maiolino wrote: > This patch aims to replace kmem_zalloc_large() with global kernel memory > API. So, all its callers are now using kvzalloc() directly, so kmalloc() > fallsback to vmalloc() automatically. > > Signed-off-by: Carlos Maiolino <cmaiolino@redhat.com> > Reviewed-by: Dave Chinner <dchinner@redhat.com> Looks good to me, Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> --D > --- > > Changelog: > > V2: > Remove __GFP_RETRY_MAYFAIL from the kvzalloc() calls > V3: > Remove extra newline > > fs/xfs/kmem.h | 6 ------ > fs/xfs/scrub/symlink.c | 2 +- > fs/xfs/xfs_acl.c | 2 +- > fs/xfs/xfs_ioctl.c | 4 ++-- > fs/xfs/xfs_rtalloc.c | 2 +- > 5 files changed, 5 insertions(+), 11 deletions(-) > > diff --git a/fs/xfs/kmem.h b/fs/xfs/kmem.h > index fb1d066770723..38007117697ef 100644 > --- a/fs/xfs/kmem.h > +++ b/fs/xfs/kmem.h > @@ -71,12 +71,6 @@ kmem_zalloc(size_t size, xfs_km_flags_t flags) > return kmem_alloc(size, flags | KM_ZERO); > } > > -static inline void * > -kmem_zalloc_large(size_t size, xfs_km_flags_t flags) > -{ > - return kmem_alloc_large(size, flags | KM_ZERO); > -} > - > /* > * Zone interfaces > */ > diff --git a/fs/xfs/scrub/symlink.c b/fs/xfs/scrub/symlink.c > index 5641ae512c9ef..c08be5ede0661 100644 > --- a/fs/xfs/scrub/symlink.c > +++ b/fs/xfs/scrub/symlink.c > @@ -22,7 +22,7 @@ xchk_setup_symlink( > struct xfs_inode *ip) > { > /* Allocate the buffer without the inode lock held. */ > - sc->buf = kmem_zalloc_large(XFS_SYMLINK_MAXLEN + 1, 0); > + sc->buf = kvzalloc(XFS_SYMLINK_MAXLEN + 1, GFP_KERNEL); > if (!sc->buf) > return -ENOMEM; > > diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c > index d4c687b5cd067..c544951a0c07f 100644 > --- a/fs/xfs/xfs_acl.c > +++ b/fs/xfs/xfs_acl.c > @@ -192,7 +192,7 @@ __xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type) > > if (acl) { > args.valuelen = XFS_ACL_SIZE(acl->a_count); > - args.value = kmem_zalloc_large(args.valuelen, 0); > + args.value = kvzalloc(args.valuelen, GFP_KERNEL); > if (!args.value) > return -ENOMEM; > xfs_acl_to_disk(args.value, acl); > diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c > index 6f22a66777cd0..4428b893f8372 100644 > --- a/fs/xfs/xfs_ioctl.c > +++ b/fs/xfs/xfs_ioctl.c > @@ -404,7 +404,7 @@ xfs_ioc_attr_list( > context.cursor.offset)) > return -EINVAL; > > - buffer = kmem_zalloc_large(bufsize, 0); > + buffer = kvzalloc(bufsize, GFP_KERNEL); > if (!buffer) > return -ENOMEM; > > @@ -1690,7 +1690,7 @@ xfs_ioc_getbmap( > if (bmx.bmv_count > ULONG_MAX / recsize) > return -ENOMEM; > > - buf = kmem_zalloc_large(bmx.bmv_count * sizeof(*buf), 0); > + buf = kvzalloc(bmx.bmv_count * sizeof(*buf), GFP_KERNEL); > if (!buf) > return -ENOMEM; > > diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c > index 6209e7b6b895b..0558f92ecdb83 100644 > --- a/fs/xfs/xfs_rtalloc.c > +++ b/fs/xfs/xfs_rtalloc.c > @@ -862,7 +862,7 @@ xfs_alloc_rsum_cache( > * lower bound on the minimum level with any free extents. We can > * continue without the cache if it couldn't be allocated. > */ > - mp->m_rsum_cache = kmem_zalloc_large(rbmblocks, 0); > + mp->m_rsum_cache = kvzalloc(rbmblocks, GFP_KERNEL); > if (!mp->m_rsum_cache) > xfs_warn(mp, "could not allocate realtime summary cache"); > } > -- > 2.26.2 >
diff --git a/fs/xfs/kmem.h b/fs/xfs/kmem.h index fb1d066770723..38007117697ef 100644 --- a/fs/xfs/kmem.h +++ b/fs/xfs/kmem.h @@ -71,12 +71,6 @@ kmem_zalloc(size_t size, xfs_km_flags_t flags) return kmem_alloc(size, flags | KM_ZERO); } -static inline void * -kmem_zalloc_large(size_t size, xfs_km_flags_t flags) -{ - return kmem_alloc_large(size, flags | KM_ZERO); -} - /* * Zone interfaces */ diff --git a/fs/xfs/scrub/symlink.c b/fs/xfs/scrub/symlink.c index 5641ae512c9ef..c08be5ede0661 100644 --- a/fs/xfs/scrub/symlink.c +++ b/fs/xfs/scrub/symlink.c @@ -22,7 +22,7 @@ xchk_setup_symlink( struct xfs_inode *ip) { /* Allocate the buffer without the inode lock held. */ - sc->buf = kmem_zalloc_large(XFS_SYMLINK_MAXLEN + 1, 0); + sc->buf = kvzalloc(XFS_SYMLINK_MAXLEN + 1, GFP_KERNEL); if (!sc->buf) return -ENOMEM; diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c index d4c687b5cd067..c544951a0c07f 100644 --- a/fs/xfs/xfs_acl.c +++ b/fs/xfs/xfs_acl.c @@ -192,7 +192,7 @@ __xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type) if (acl) { args.valuelen = XFS_ACL_SIZE(acl->a_count); - args.value = kmem_zalloc_large(args.valuelen, 0); + args.value = kvzalloc(args.valuelen, GFP_KERNEL); if (!args.value) return -ENOMEM; xfs_acl_to_disk(args.value, acl); diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index 6f22a66777cd0..4428b893f8372 100644 --- a/fs/xfs/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c @@ -404,7 +404,7 @@ xfs_ioc_attr_list( context.cursor.offset)) return -EINVAL; - buffer = kmem_zalloc_large(bufsize, 0); + buffer = kvzalloc(bufsize, GFP_KERNEL); if (!buffer) return -ENOMEM; @@ -1690,7 +1690,7 @@ xfs_ioc_getbmap( if (bmx.bmv_count > ULONG_MAX / recsize) return -ENOMEM; - buf = kmem_zalloc_large(bmx.bmv_count * sizeof(*buf), 0); + buf = kvzalloc(bmx.bmv_count * sizeof(*buf), GFP_KERNEL); if (!buf) return -ENOMEM; diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c index 6209e7b6b895b..0558f92ecdb83 100644 --- a/fs/xfs/xfs_rtalloc.c +++ b/fs/xfs/xfs_rtalloc.c @@ -862,7 +862,7 @@ xfs_alloc_rsum_cache( * lower bound on the minimum level with any free extents. We can * continue without the cache if it couldn't be allocated. */ - mp->m_rsum_cache = kmem_zalloc_large(rbmblocks, 0); + mp->m_rsum_cache = kvzalloc(rbmblocks, GFP_KERNEL); if (!mp->m_rsum_cache) xfs_warn(mp, "could not allocate realtime summary cache"); }