@@ -119,12 +119,12 @@ xprt_rdma_bc_allocate(struct rpc_task *task)
return -EINVAL;
}
- page = alloc_page(RPCRDMA_DEF_GFP);
+ page = alloc_page(GFP_KERNEL);
if (!page)
return -ENOMEM;
rqst->rq_buffer = page_address(page);
- rqst->rq_rbuffer = kmalloc(rqst->rq_rcvsize, RPCRDMA_DEF_GFP);
+ rqst->rq_rbuffer = kmalloc(rqst->rq_rcvsize, GFP_KERNEL);
if (!rqst->rq_rbuffer) {
put_page(page);
return -ENOMEM;
@@ -541,11 +541,10 @@ xprt_rdma_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *rqst)
}
static bool rpcrdma_check_regbuf(struct rpcrdma_xprt *r_xprt,
- struct rpcrdma_regbuf *rb, size_t size,
- gfp_t flags)
+ struct rpcrdma_regbuf *rb, size_t size)
{
if (unlikely(rdmab_length(rb) < size)) {
- if (!rpcrdma_regbuf_realloc(rb, size, flags))
+ if (!rpcrdma_regbuf_realloc(rb, size))
return false;
r_xprt->rx_stats.hardway_register_count += size;
}
@@ -567,17 +566,10 @@ xprt_rdma_allocate(struct rpc_task *task)
struct rpc_rqst *rqst = task->tk_rqstp;
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
- gfp_t flags;
- flags = RPCRDMA_DEF_GFP;
- if (RPC_IS_SWAPPER(task))
- flags = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN;
-
- if (!rpcrdma_check_regbuf(r_xprt, req->rl_sendbuf, rqst->rq_callsize,
- flags))
+ if (!rpcrdma_check_regbuf(r_xprt, req->rl_sendbuf, rqst->rq_callsize))
goto out_fail;
- if (!rpcrdma_check_regbuf(r_xprt, req->rl_recvbuf, rqst->rq_rcvsize,
- flags))
+ if (!rpcrdma_check_regbuf(r_xprt, req->rl_recvbuf, rqst->rq_rcvsize))
goto out_fail;
rqst->rq_buffer = rdmab_data(req->rl_sendbuf);
@@ -1259,16 +1259,15 @@ rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction,
* rpcrdma_regbuf_realloc - re-allocate a SEND/RECV buffer
* @rb: regbuf to reallocate
* @size: size of buffer to be allocated, in bytes
- * @flags: GFP flags
*
* Returns true if reallocation was successful. If false is
* returned, @rb is left untouched.
*/
-bool rpcrdma_regbuf_realloc(struct rpcrdma_regbuf *rb, size_t size, gfp_t flags)
+bool rpcrdma_regbuf_realloc(struct rpcrdma_regbuf *rb, size_t size)
{
void *buf;
- buf = kmalloc(size, flags);
+ buf = kmalloc(size, GFP_KERNEL);
if (!buf)
return false;
@@ -149,8 +149,6 @@ static inline void *rdmab_data(const struct rpcrdma_regbuf *rb)
return rb->rg_data;
}
-#define RPCRDMA_DEF_GFP (GFP_NOIO | __GFP_NOWARN)
-
/* To ensure a transport can always make forward progress,
* the number of RDMA segments allowed in header chunk lists
* is capped at 16. This prevents less-capable devices from
@@ -484,8 +482,7 @@ void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers,
void rpcrdma_rep_put(struct rpcrdma_buffer *buf, struct rpcrdma_rep *rep);
void rpcrdma_reply_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req);
-bool rpcrdma_regbuf_realloc(struct rpcrdma_regbuf *rb, size_t size,
- gfp_t flags);
+bool rpcrdma_regbuf_realloc(struct rpcrdma_regbuf *rb, size_t size);
bool __rpcrdma_regbuf_dma_map(struct rpcrdma_xprt *r_xprt,
struct rpcrdma_regbuf *rb);
When allocating memory, it should be safe to always use GFP_KERNEL, since both swap tasks and asynchronous tasks will regulate the allocation mode through the struct task flags. A similar change was recently made for RPC socket transports. Since the @flags argument to rpcrdma_regbuf_realloc() is now invariant, remove it. Suggested-by: Trond Myklebust <trondmy@kernel.org> Signed-off-by: Chuck Lever <chuck.lever@oracle.com> --- net/sunrpc/xprtrdma/svc_rdma_backchannel.c | 4 ++-- net/sunrpc/xprtrdma/transport.c | 16 ++++------------ net/sunrpc/xprtrdma/verbs.c | 5 ++--- net/sunrpc/xprtrdma/xprt_rdma.h | 5 +---- 4 files changed, 9 insertions(+), 21 deletions(-)