diff options
author | Trond Myklebust <trond.myklebust@hammerspace.com> | 2022-07-27 12:27:54 -0400 |
---|---|---|
committer | Trond Myklebust <trond.myklebust@hammerspace.com> | 2022-07-27 12:45:17 -0400 |
commit | 6622e3a73112fc336c1c2c582428fb5ef18e456a (patch) | |
tree | 40b81e3638715fe090813c54bd38dd2af040338c /net/sunrpc/backchannel_rqst.c | |
parent | e35a5e782f67ed76a65ad0f23a484444a95f000f (diff) |
SUNRPC: Reinitialise the backchannel request buffers before reuse
When we're reusing the backchannel requests instead of freeing them,
then we should reinitialise any values of the send/receive xdr_bufs so
that they reflect the available space.
Fixes: 0d2a970d0ae5 ("SUNRPC: Fix a backchannel race")
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
Diffstat (limited to 'net/sunrpc/backchannel_rqst.c')
-rw-r--r-- | net/sunrpc/backchannel_rqst.c | 14 |
1 files changed, 14 insertions, 0 deletions
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c index 5a6b61dcdf2d..ad8ef1fb08b4 100644 --- a/net/sunrpc/backchannel_rqst.c +++ b/net/sunrpc/backchannel_rqst.c @@ -64,6 +64,17 @@ static void xprt_free_allocation(struct rpc_rqst *req) kfree(req); } +static void xprt_bc_reinit_xdr_buf(struct xdr_buf *buf) +{ + buf->head[0].iov_len = PAGE_SIZE; + buf->tail[0].iov_len = 0; + buf->pages = NULL; + buf->page_len = 0; + buf->flags = 0; + buf->len = 0; + buf->buflen = PAGE_SIZE; +} + static int xprt_alloc_xdr_buf(struct xdr_buf *buf, gfp_t gfp_flags) { struct page *page; @@ -292,6 +303,9 @@ void xprt_free_bc_rqst(struct rpc_rqst *req) */ spin_lock_bh(&xprt->bc_pa_lock); if (xprt_need_to_requeue(xprt)) { + xprt_bc_reinit_xdr_buf(&req->rq_snd_buf); + xprt_bc_reinit_xdr_buf(&req->rq_rcv_buf); + req->rq_rcv_buf.len = PAGE_SIZE; list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list); xprt->bc_alloc_count++; atomic_inc(&xprt->bc_slot_count); |