diff options
author | Trond Myklebust <trond.myklebust@hammerspace.com> | 2019-03-02 10:14:02 -0500 |
---|---|---|
committer | Trond Myklebust <trond.myklebust@hammerspace.com> | 2019-03-02 16:25:26 -0500 |
commit | 12a3ad6184f86ba48f2269198c1a4520085f3002 (patch) | |
tree | 68fd5f22bc1e1ba59cfd3eebf7a70a9dd40d17de /net/sunrpc/sched.c | |
parent | cefa587a40bb5333901486632d4062f40a146585 (diff) |
SUNRPC: Convert remaining GFP_NOIO, and GFP_NOWAIT sites in sunrpc
Convert the remaining gfp_flags arguments in sunrpc to standard reclaiming
allocations, now that we set memalloc_nofs_save() as appropriate.
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
Diffstat (limited to 'net/sunrpc/sched.c')
-rw-r--r-- | net/sunrpc/sched.c | 7 |
1 files changed, 2 insertions, 5 deletions
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 2168d4d9c09f..f21557213a43 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -925,16 +925,13 @@ static void rpc_async_schedule(struct work_struct *work) * Most requests are 'small' (under 2KiB) and can be serviced from a * mempool, ensuring that NFS reads and writes can always proceed, * and that there is good locality of reference for these buffers. - * - * In order to avoid memory starvation triggering more writebacks of - * NFS requests, we avoid using GFP_KERNEL. */ int rpc_malloc(struct rpc_task *task) { struct rpc_rqst *rqst = task->tk_rqstp; size_t size = rqst->rq_callsize + rqst->rq_rcvsize; struct rpc_buffer *buf; - gfp_t gfp = GFP_NOIO | __GFP_NOWARN; + gfp_t gfp = GFP_NOFS; if (RPC_IS_SWAPPER(task)) gfp = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN; @@ -1015,7 +1012,7 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta static struct rpc_task * rpc_alloc_task(void) { - return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOIO); + return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS); } /* |