summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2024-07-29 16:52:32 -0400
committerChuck Lever <chuck.lever@oracle.com>2024-09-20 19:31:03 -0400
commitc4de97f7c45434985e5dbf2d6ccc9eca676e37fe (patch)
tree44ff5a824220c81f9b5864f773207337e7f762b1 /net
parent438f81e0e92a780b117097503599eb030b77dabe (diff)
svcrdma: Handle device removal outside of the CM event handler
Synchronously wait for all disconnects to complete to ensure the transports have divested all hardware resources before the underlying RDMA device can safely be removed. Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Diffstat (limited to 'net')
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c16
1 files changed, 15 insertions, 1 deletions
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index f15750cacacf..581cc5ed7c0c 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -339,7 +339,6 @@ static int svc_rdma_cma_handler(struct rdma_cm_id *cma_id,
svc_xprt_enqueue(xprt);
break;
case RDMA_CM_EVENT_DISCONNECTED:
- case RDMA_CM_EVENT_DEVICE_REMOVAL:
svc_xprt_deferred_close(xprt);
break;
default:
@@ -384,6 +383,16 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
return &cma_xprt->sc_xprt;
}
+static void svc_rdma_xprt_done(struct rpcrdma_notification *rn)
+{
+ struct svcxprt_rdma *rdma = container_of(rn, struct svcxprt_rdma,
+ sc_rn);
+ struct rdma_cm_id *id = rdma->sc_cm_id;
+
+ trace_svcrdma_device_removal(id);
+ svc_xprt_close(&rdma->sc_xprt);
+}
+
/*
* This is the xpo_recvfrom function for listening endpoints. Its
* purpose is to accept incoming connections. The CMA callback handler
@@ -425,6 +434,9 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
dev = newxprt->sc_cm_id->device;
newxprt->sc_port_num = newxprt->sc_cm_id->port_num;
+ if (rpcrdma_rn_register(dev, &newxprt->sc_rn, svc_rdma_xprt_done))
+ goto errout;
+
newxprt->sc_max_req_size = svcrdma_max_req_size;
newxprt->sc_max_requests = svcrdma_max_requests;
newxprt->sc_max_bc_requests = svcrdma_max_bc_requests;
@@ -580,6 +592,7 @@ static void __svc_rdma_free(struct work_struct *work)
{
struct svcxprt_rdma *rdma =
container_of(work, struct svcxprt_rdma, sc_work);
+ struct ib_device *device = rdma->sc_cm_id->device;
/* This blocks until the Completion Queues are empty */
if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
@@ -608,6 +621,7 @@ static void __svc_rdma_free(struct work_struct *work)
/* Destroy the CM ID */
rdma_destroy_id(rdma->sc_cm_id);
+ rpcrdma_rn_unregister(device, &rdma->sc_rn);
kfree(rdma);
}