diff options
author | Xi Wang <wangxi11@huawei.com> | 2021-05-21 17:29:51 +0800 |
---|---|---|
committer | Jason Gunthorpe <jgg@nvidia.com> | 2021-05-28 20:13:57 -0300 |
commit | 7b0006db6800da4c05883584befa46502d85dede (patch) | |
tree | df6f449dcefdfe4504d35b8e5702823dcb61a11c /drivers/infiniband/hw/hns/hns_roce_srq.c | |
parent | 9ecf6ac17c321df396df283a771c29663e44871a (diff) |
RDMA/hns: Optimize the base address table config for MTR
The base address table is allocated by dma allocator, and the size is
always aligned to PAGE_SIZE. If a fixed size is used to allocate the
table, the number of base address entries stored in the table will be
smaller than that can actually stored.
Link: https://lore.kernel.org/r/1621589395-2435-2-git-send-email-liweihang@huawei.com
Signed-off-by: Xi Wang <wangxi11@huawei.com>
Signed-off-by: Weihang Li <liweihang@huawei.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/hw/hns/hns_roce_srq.c')
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_srq.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c index 546d182c577a..c842210f7c47 100644 --- a/drivers/infiniband/hw/hns/hns_roce_srq.c +++ b/drivers/infiniband/hw/hns/hns_roce_srq.c @@ -167,14 +167,14 @@ static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq, srq->idx_que.entry_shift = ilog2(HNS_ROCE_IDX_QUE_ENTRY_SZ); - buf_attr.page_shift = hr_dev->caps.idx_buf_pg_sz + HNS_HW_PAGE_SHIFT; + buf_attr.page_shift = hr_dev->caps.idx_buf_pg_sz + PAGE_SHIFT; buf_attr.region[0].size = to_hr_hem_entries_size(srq->wqe_cnt, srq->idx_que.entry_shift); buf_attr.region[0].hopnum = hr_dev->caps.idx_hop_num; buf_attr.region_count = 1; ret = hns_roce_mtr_create(hr_dev, &idx_que->mtr, &buf_attr, - hr_dev->caps.idx_ba_pg_sz + HNS_HW_PAGE_SHIFT, + hr_dev->caps.idx_ba_pg_sz + PAGE_SHIFT, udata, addr); if (ret) { ibdev_err(ibdev, @@ -222,15 +222,15 @@ static int alloc_srq_wqe_buf(struct hns_roce_dev *hr_dev, HNS_ROCE_SGE_SIZE * srq->max_gs))); - buf_attr.page_shift = hr_dev->caps.srqwqe_buf_pg_sz + HNS_HW_PAGE_SHIFT; + buf_attr.page_shift = hr_dev->caps.srqwqe_buf_pg_sz + PAGE_SHIFT; buf_attr.region[0].size = to_hr_hem_entries_size(srq->wqe_cnt, srq->wqe_shift); buf_attr.region[0].hopnum = hr_dev->caps.srqwqe_hop_num; buf_attr.region_count = 1; ret = hns_roce_mtr_create(hr_dev, &srq->buf_mtr, &buf_attr, - hr_dev->caps.srqwqe_ba_pg_sz + - HNS_HW_PAGE_SHIFT, udata, addr); + hr_dev->caps.srqwqe_ba_pg_sz + PAGE_SHIFT, + udata, addr); if (ret) ibdev_err(ibdev, "failed to alloc SRQ buf mtr, ret = %d.\n", ret); |