summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-03-18 09:39:22 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-18 09:39:22 -0700
commit9ea446352047d8350553250db51da2c73a610688 (patch)
treef75712fb5bfb5d2a83685023b2838f3c9ea40320 /net
parent9dffdb38d864ae89e16ff7b3a09451270736e35b (diff)
parent082eaa50838c6b70a8244f8b01d7ed7d686f84db (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
Pull rdma updates from Doug Ledford: "Initial roundup of 4.6 merge window patches. This is the first of two pull requests. It is the smaller request, but touches for more different things (this is everything but what is in or going into staging). The pull request for the code in staging/rdma is on hold until after we decide what to do on the write/writev API issue and may be partially deferred until 4.7 as a result. Summary: - cxgb4 updates - nes updates - unification of iwarp portmapper code to core - add drain_cq API - various ib_core updates - minor ipoib updates - minor mlx4 updates - more significant mlx5 updates (including a minor merge conflict with net-next tree...merge is simple to resolve and Stephen's resolution was confirmed by Mellanox) - trivial net/9p rdma conversion - ocrdma RoCEv2 update - srpt updates" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (85 commits) iwpm: crash fix for large connections test iw_cxgb3: support for iWARP port mapping iw_cxgb4: remove port mapper related code iw_nes: remove port mapper related code iwcm: common code for port mapper net/9p: convert to new CQ API IB/mlx5: Add support for don't trap rules net/mlx5_core: Introduce forward to next priority action net/mlx5_core: Create anchor of last flow table iser: Accept arbitrary sg lists mapping if the device supports it mlx5: Add arbitrary sg list support IB/core: Add arbitrary sg_list support IB/mlx5: Expose correct max_fast_reg_page_list_len IB/mlx5: Make coding style more consistent IB/mlx5: Convert UMR CQ to new CQ API IB/ocrdma: Skip using unneeded intermediate variable IB/ocrdma: Skip using unneeded intermediate variable IB/ocrdma: Delete unnecessary variable initialisations in 11 functions IB/core: Documentation fix in the MAD header file IB/core: trivial prink cleanup. ...
Diffstat (limited to 'net')
-rw-r--r--net/9p/trans_rdma.c86
1 files changed, 31 insertions, 55 deletions
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 52b4a2f993f2..1852e383afd6 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -109,14 +109,13 @@ struct p9_trans_rdma {
/**
* p9_rdma_context - Keeps track of in-process WR
*
- * @wc_op: The original WR op for when the CQE completes in error.
* @busa: Bus address to unmap when the WR completes
* @req: Keeps track of requests (send)
* @rc: Keepts track of replies (receive)
*/
struct p9_rdma_req;
struct p9_rdma_context {
- enum ib_wc_opcode wc_op;
+ struct ib_cqe cqe;
dma_addr_t busa;
union {
struct p9_req_t *req;
@@ -284,9 +283,12 @@ p9_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
}
static void
-handle_recv(struct p9_client *client, struct p9_trans_rdma *rdma,
- struct p9_rdma_context *c, enum ib_wc_status status, u32 byte_len)
+recv_done(struct ib_cq *cq, struct ib_wc *wc)
{
+ struct p9_client *client = cq->cq_context;
+ struct p9_trans_rdma *rdma = client->trans;
+ struct p9_rdma_context *c =
+ container_of(wc->wr_cqe, struct p9_rdma_context, cqe);
struct p9_req_t *req;
int err = 0;
int16_t tag;
@@ -295,7 +297,7 @@ handle_recv(struct p9_client *client, struct p9_trans_rdma *rdma,
ib_dma_unmap_single(rdma->cm_id->device, c->busa, client->msize,
DMA_FROM_DEVICE);
- if (status != IB_WC_SUCCESS)
+ if (wc->status != IB_WC_SUCCESS)
goto err_out;
err = p9_parse_header(c->rc, NULL, NULL, &tag, 1);
@@ -316,21 +318,32 @@ handle_recv(struct p9_client *client, struct p9_trans_rdma *rdma,
req->rc = c->rc;
p9_client_cb(client, req, REQ_STATUS_RCVD);
+ out:
+ up(&rdma->rq_sem);
+ kfree(c);
return;
err_out:
- p9_debug(P9_DEBUG_ERROR, "req %p err %d status %d\n", req, err, status);
+ p9_debug(P9_DEBUG_ERROR, "req %p err %d status %d\n",
+ req, err, wc->status);
rdma->state = P9_RDMA_FLUSHING;
client->status = Disconnected;
+ goto out;
}
static void
-handle_send(struct p9_client *client, struct p9_trans_rdma *rdma,
- struct p9_rdma_context *c, enum ib_wc_status status, u32 byte_len)
+send_done(struct ib_cq *cq, struct ib_wc *wc)
{
+ struct p9_client *client = cq->cq_context;
+ struct p9_trans_rdma *rdma = client->trans;
+ struct p9_rdma_context *c =
+ container_of(wc->wr_cqe, struct p9_rdma_context, cqe);
+
ib_dma_unmap_single(rdma->cm_id->device,
c->busa, c->req->tc->size,
DMA_TO_DEVICE);
+ up(&rdma->sq_sem);
+ kfree(c);
}
static void qp_event_handler(struct ib_event *event, void *context)
@@ -339,42 +352,6 @@ static void qp_event_handler(struct ib_event *event, void *context)
event->event, context);
}
-static void cq_comp_handler(struct ib_cq *cq, void *cq_context)
-{
- struct p9_client *client = cq_context;
- struct p9_trans_rdma *rdma = client->trans;
- int ret;
- struct ib_wc wc;
-
- ib_req_notify_cq(rdma->cq, IB_CQ_NEXT_COMP);
- while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
- struct p9_rdma_context *c = (void *) (unsigned long) wc.wr_id;
-
- switch (c->wc_op) {
- case IB_WC_RECV:
- handle_recv(client, rdma, c, wc.status, wc.byte_len);
- up(&rdma->rq_sem);
- break;
-
- case IB_WC_SEND:
- handle_send(client, rdma, c, wc.status, wc.byte_len);
- up(&rdma->sq_sem);
- break;
-
- default:
- pr_err("unexpected completion type, c->wc_op=%d, wc.opcode=%d, status=%d\n",
- c->wc_op, wc.opcode, wc.status);
- break;
- }
- kfree(c);
- }
-}
-
-static void cq_event_handler(struct ib_event *e, void *v)
-{
- p9_debug(P9_DEBUG_ERROR, "CQ event %d context %p\n", e->event, v);
-}
-
static void rdma_destroy_trans(struct p9_trans_rdma *rdma)
{
if (!rdma)
@@ -387,7 +364,7 @@ static void rdma_destroy_trans(struct p9_trans_rdma *rdma)
ib_dealloc_pd(rdma->pd);
if (rdma->cq && !IS_ERR(rdma->cq))
- ib_destroy_cq(rdma->cq);
+ ib_free_cq(rdma->cq);
if (rdma->cm_id && !IS_ERR(rdma->cm_id))
rdma_destroy_id(rdma->cm_id);
@@ -408,13 +385,14 @@ post_recv(struct p9_client *client, struct p9_rdma_context *c)
if (ib_dma_mapping_error(rdma->cm_id->device, c->busa))
goto error;
+ c->cqe.done = recv_done;
+
sge.addr = c->busa;
sge.length = client->msize;
sge.lkey = rdma->pd->local_dma_lkey;
wr.next = NULL;
- c->wc_op = IB_WC_RECV;
- wr.wr_id = (unsigned long) c;
+ wr.wr_cqe = &c->cqe;
wr.sg_list = &sge;
wr.num_sge = 1;
return ib_post_recv(rdma->qp, &wr, &bad_wr);
@@ -499,13 +477,14 @@ dont_need_post_recv:
goto send_error;
}
+ c->cqe.done = send_done;
+
sge.addr = c->busa;
sge.length = c->req->tc->size;
sge.lkey = rdma->pd->local_dma_lkey;
wr.next = NULL;
- c->wc_op = IB_WC_SEND;
- wr.wr_id = (unsigned long) c;
+ wr.wr_cqe = &c->cqe;
wr.opcode = IB_WR_SEND;
wr.send_flags = IB_SEND_SIGNALED;
wr.sg_list = &sge;
@@ -642,7 +621,6 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
struct p9_trans_rdma *rdma;
struct rdma_conn_param conn_param;
struct ib_qp_init_attr qp_attr;
- struct ib_cq_init_attr cq_attr = {};
/* Parse the transport specific mount options */
err = parse_opts(args, &opts);
@@ -695,13 +673,11 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
goto error;
/* Create the Completion Queue */
- cq_attr.cqe = opts.sq_depth + opts.rq_depth + 1;
- rdma->cq = ib_create_cq(rdma->cm_id->device, cq_comp_handler,
- cq_event_handler, client,
- &cq_attr);
+ rdma->cq = ib_alloc_cq(rdma->cm_id->device, client,
+ opts.sq_depth + opts.rq_depth + 1,
+ 0, IB_POLL_SOFTIRQ);
if (IS_ERR(rdma->cq))
goto error;
- ib_req_notify_cq(rdma->cq, IB_CQ_NEXT_COMP);
/* Create the Protection Domain */
rdma->pd = ib_alloc_pd(rdma->cm_id->device);