diff options
author | Max Gurtovoy <mgurtovoy@nvidia.com> | 2022-02-09 10:54:49 +0200 |
---|---|---|
committer | Christoph Hellwig <hch@lst.de> | 2022-02-28 13:45:05 +0200 |
commit | 4686af885a9168f9ec70c4063616640911c48b03 (patch) | |
tree | 32b7e2705526d2b91e925768e7b06e0882c519c4 /drivers/nvme/host | |
parent | 44f331a630bdc7c61de9c6760c4eec0133ee9f04 (diff) |
nvme-rdma: add helpers for mapping/unmapping request
Introduce nvme_rdma_dma_map_req/nvme_rdma_dma_unmap_req helper functions
to improve code readability and ease on the error flow.
Reviewed-by: Israel Rukshin <israelr@nvidia.com>
Signed-off-by: Max Gurtovoy <mgurtovoy@nvidia.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'drivers/nvme/host')
-rw-r--r-- | drivers/nvme/host/rdma.c | 111 |
1 files changed, 65 insertions, 46 deletions
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 850f84d204d0..14ec2c85db06 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1282,6 +1282,22 @@ static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue, return ib_post_send(queue->qp, &wr, NULL); } +static void nvme_rdma_dma_unmap_req(struct ib_device *ibdev, struct request *rq) +{ + struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); + + if (blk_integrity_rq(rq)) { + ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl, + req->metadata_sgl->nents, rq_dma_dir(rq)); + sg_free_table_chained(&req->metadata_sgl->sg_table, + NVME_INLINE_METADATA_SG_CNT); + } + + ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents, + rq_dma_dir(rq)); + sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT); +} + static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue, struct request *rq) { @@ -1293,13 +1309,6 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue, if (!blk_rq_nr_phys_segments(rq)) return; - if (blk_integrity_rq(rq)) { - ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl, - req->metadata_sgl->nents, rq_dma_dir(rq)); - sg_free_table_chained(&req->metadata_sgl->sg_table, - NVME_INLINE_METADATA_SG_CNT); - } - if (req->use_sig_mr) pool = &queue->qp->sig_mrs; @@ -1308,9 +1317,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue, req->mr = NULL; } - ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents, - rq_dma_dir(rq)); - sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT); + nvme_rdma_dma_unmap_req(ibdev, rq); } static int nvme_rdma_set_sg_null(struct nvme_command *c) @@ -1521,22 +1528,11 @@ mr_put: return -EINVAL; } -static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, - struct request *rq, struct nvme_command *c) +static int nvme_rdma_dma_map_req(struct ib_device *ibdev, struct request *rq, + int *count, int *pi_count) { struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); - struct nvme_rdma_device *dev = queue->device; - struct ib_device *ibdev = dev->dev; - int pi_count = 0; - int count, ret; - - req->num_sge = 1; - refcount_set(&req->ref, 2); /* send and recv completions */ - - c->common.flags |= NVME_CMD_SGL_METABUF; - - if (!blk_rq_nr_phys_segments(rq)) - return nvme_rdma_set_sg_null(c); + int ret; req->data_sgl.sg_table.sgl = (struct scatterlist *)(req + 1); ret = sg_alloc_table_chained(&req->data_sgl.sg_table, @@ -1548,9 +1544,9 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, req->data_sgl.nents = blk_rq_map_sg(rq->q, rq, req->data_sgl.sg_table.sgl); - count = ib_dma_map_sg(ibdev, req->data_sgl.sg_table.sgl, - req->data_sgl.nents, rq_dma_dir(rq)); - if (unlikely(count <= 0)) { + *count = ib_dma_map_sg(ibdev, req->data_sgl.sg_table.sgl, + req->data_sgl.nents, rq_dma_dir(rq)); + if (unlikely(*count <= 0)) { ret = -EIO; goto out_free_table; } @@ -1569,16 +1565,50 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq->q, rq->bio, req->metadata_sgl->sg_table.sgl); - pi_count = ib_dma_map_sg(ibdev, - req->metadata_sgl->sg_table.sgl, - req->metadata_sgl->nents, - rq_dma_dir(rq)); - if (unlikely(pi_count <= 0)) { + *pi_count = ib_dma_map_sg(ibdev, + req->metadata_sgl->sg_table.sgl, + req->metadata_sgl->nents, + rq_dma_dir(rq)); + if (unlikely(*pi_count <= 0)) { ret = -EIO; goto out_free_pi_table; } } + return 0; + +out_free_pi_table: + sg_free_table_chained(&req->metadata_sgl->sg_table, + NVME_INLINE_METADATA_SG_CNT); +out_unmap_sg: + ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents, + rq_dma_dir(rq)); +out_free_table: + sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT); + return ret; +} + +static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, + struct request *rq, struct nvme_command *c) +{ + struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); + struct nvme_rdma_device *dev = queue->device; + struct ib_device *ibdev = dev->dev; + int pi_count = 0; + int count, ret; + + req->num_sge = 1; + refcount_set(&req->ref, 2); /* send and recv completions */ + + c->common.flags |= NVME_CMD_SGL_METABUF; + + if (!blk_rq_nr_phys_segments(rq)) + return nvme_rdma_set_sg_null(c); + + ret = nvme_rdma_dma_map_req(ibdev, rq, &count, &pi_count); + if (unlikely(ret)) + return ret; + if (req->use_sig_mr) { ret = nvme_rdma_map_sg_pi(queue, req, c, count, pi_count); goto out; @@ -1602,23 +1632,12 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, ret = nvme_rdma_map_sg_fr(queue, req, c, count); out: if (unlikely(ret)) - goto out_unmap_pi_sg; + goto out_dma_unmap_req; return 0; -out_unmap_pi_sg: - if (blk_integrity_rq(rq)) - ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl, - req->metadata_sgl->nents, rq_dma_dir(rq)); -out_free_pi_table: - if (blk_integrity_rq(rq)) - sg_free_table_chained(&req->metadata_sgl->sg_table, - NVME_INLINE_METADATA_SG_CNT); -out_unmap_sg: - ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents, - rq_dma_dir(rq)); -out_free_table: - sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT); +out_dma_unmap_req: + nvme_rdma_dma_unmap_req(ibdev, rq); return ret; } |