diff options
author | Mikhail Pershin <mike.pershin@intel.com> | 2016-09-18 16:38:53 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2016-09-19 10:08:22 +0200 |
commit | be2479818ff227fbb6800935909b65e06159735c (patch) | |
tree | 5b55e56def08b826022168916e2c0a5bc126cf80 | |
parent | bbc222b113fb5bed066a9bcc4ce7c1a92528a47e (diff) |
staging: lustre: ptlrpc: prevent request timeout grow due to recovery
Patch fixes the issue seen on the client with growing request
timeout which occurred after the server side patch landed for
LU-5079. While commit itself is correct, it reveals another
issue. If request is being processed for a long time on server
then client adaptive timeouts will adapt to that after receiving
reply and new requests will have bigger timeout. Another problem
is that server AT history is corrupted by recovery request
processing time which not pure service time but includes
also waiting time for clients to recover.
Patch prevents the AT stats update from early replies on client and
from recovering requests processing time on server.
The ptlrpc_at_recv_early_reply() still updates the current request
timeout as asked by server, but don't include this into AT stats.
The real reply will bring that data from server after all.
Signed-off-by: Mikhail Pershin <mike.pershin@intel.com>
Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-6084
Reviewed-on: http://review.whamcloud.com/13520
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Reviewed-by: Jian Yu <jian.yu@intel.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
Signed-off-by: James Simmons <jsimmons@infradead.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r-- | drivers/staging/lustre/lustre/ptlrpc/client.c | 30 | ||||
-rw-r--r-- | drivers/staging/lustre/lustre/ptlrpc/service.c | 12 |
2 files changed, 22 insertions, 20 deletions
diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c index 18c7ff7b89a6..8c51d51a678b 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/client.c +++ b/drivers/staging/lustre/lustre/ptlrpc/client.c @@ -364,23 +364,27 @@ static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req) } rc = unpack_reply(early_req); - if (rc == 0) { - /* Expecting to increase the service time estimate here */ - ptlrpc_at_adj_service(req, - lustre_msg_get_timeout(early_req->rq_repmsg)); - ptlrpc_at_adj_net_latency(req, - lustre_msg_get_service_time(early_req->rq_repmsg)); - } - - sptlrpc_cli_finish_early_reply(early_req); - - if (rc != 0) { + if (rc) { + sptlrpc_cli_finish_early_reply(early_req); spin_lock(&req->rq_lock); return rc; } - /* Adjust the local timeout for this req */ - ptlrpc_at_set_req_timeout(req); + /* + * Use new timeout value just to adjust the local value for this + * request, don't include it into at_history. It is unclear yet why + * service time increased and should it be counted or skipped, e.g. + * that can be recovery case or some error or server, the real reply + * will add all new data if it is worth to add. + */ + req->rq_timeout = lustre_msg_get_timeout(early_req->rq_repmsg); + lustre_msg_set_timeout(req->rq_reqmsg, req->rq_timeout); + + /* Network latency can be adjusted, it is pure network delays */ + ptlrpc_at_adj_net_latency(req, + lustre_msg_get_service_time(early_req->rq_repmsg)); + + sptlrpc_cli_finish_early_reply(early_req); spin_lock(&req->rq_lock); olddl = req->rq_deadline; diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c index 3edc9b18fff0..72f39308eebb 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/service.c +++ b/drivers/staging/lustre/lustre/ptlrpc/service.c @@ -1015,6 +1015,7 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req) struct ptlrpc_request *reqcopy; struct lustre_msg *reqmsg; long olddl = req->rq_deadline - ktime_get_real_seconds(); + time64_t newdl; int rc; /* deadline is when the client expects us to reply, margin is the @@ -1052,16 +1053,14 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req) */ at_measured(&svcpt->scp_at_estimate, at_extra + ktime_get_real_seconds() - req->rq_arrival_time.tv_sec); + newdl = req->rq_arrival_time.tv_sec + at_get(&svcpt->scp_at_estimate); /* Check to see if we've actually increased the deadline - * we may be past adaptive_max */ - if (req->rq_deadline >= req->rq_arrival_time.tv_sec + - at_get(&svcpt->scp_at_estimate)) { + if (req->rq_deadline >= newdl) { DEBUG_REQ(D_WARNING, req, "Couldn't add any time (%ld/%lld), not sending early reply\n", - olddl, req->rq_arrival_time.tv_sec + - at_get(&svcpt->scp_at_estimate) - - ktime_get_real_seconds()); + olddl, newdl - ktime_get_real_seconds()); return -ETIMEDOUT; } @@ -1117,8 +1116,7 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req) if (!rc) { /* Adjust our own deadline to what we told the client */ - req->rq_deadline = req->rq_arrival_time.tv_sec + - at_get(&svcpt->scp_at_estimate); + req->rq_deadline = newdl; req->rq_early_count++; /* number sent, server side */ } else { DEBUG_REQ(D_ERROR, req, "Early reply send failed %d", rc); |