diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-10-14 13:35:05 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-10-14 13:35:05 -0700 |
commit | b292fb80bb44726ac1055d443d951a3058fc8263 (patch) | |
tree | 439131762deba27049f10c7177dd890c44eaa6a2 /drivers/infiniband | |
parent | 689f891c980949d3eb64f61651db53cb347e0a13 (diff) | |
parent | 1bdab400af5954932714e68ab3df0187a92916cb (diff) |
Merge tag 'for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
Pull more rdma updates from Doug Ledford:
"This merge window was the first where Huawei had to try and coordinate
their patches between their net driver and their new roce driver
(similar to mlx4 and mlx5).
They didn't do horribly, but there were some issues (and we knew that
because they simply didn't know what to do in the beginning). As a
result, I had a set of patches that depended on some patches that
normally would have come to you via Dave's tree. Those patches have
been on netdev@ for a while, so I got Dave to give me his approval to
send them to you. As such, the other 29 patches I had behind them are
also now ready to go.
This catches the hns and hns-roce drivers up to current, and for
future patches we are working with them to get them up to speed on how
to do joint driver development so that they don't have these sorts of
cross tree dependency issues again. BTW, Dave gave me permission to
add his Acked-by: to the patches against the net tree, but I've had
this branch through 0day (but not linux-next since it was off by
itself) and I didn't want to rebase the series just to add Dave's ack
for the 8 patches in the net area.
Updates to the hns drivers:
- Small patch set for hns net driver that the roce patches depend on
- Various fixes to the hns-roce driver
- Add connection manager support to the hns-roce driver"
* tag 'for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (36 commits)
IB/hns: Fix for removal of redundant code
IB/hns: Delete the redundant lines in hns_roce_v1_m_qp()
IB/hns: Fix the bug when platform_get_resource() exec fail
IB/hns: Update the rq head when modify qp state
IB/hns: Cq has not been freed
IB/hns: Validate mtu when modified qp
IB/hns: Some items of qpc need to take user param
IB/hns: The Ack timeout need a lower limit value
IB/hns: Return bad wr while post send failed
IB/hns: Fix bug of memory leakage for registering user mr
IB/hns: Modify the init of iboe lock
IB/hns: Optimize code of aeq and ceq interrupt handle and fix the bug of qpn
IB/hns: Delete the sqp_start from the structure hns_roce_caps
IB/hns: Fix bug of clear hem
IB/hns: Remove unused parameter named qp_type
IB/hns: Simplify function of pd alloc and qp alloc
IB/hns: Fix bug of using uninit refcount and free
IB/hns: Remove parameters of resize cq
IB/hns: Remove unused parameters in some functions
IB/hns: Add node_guid definition to the bindings document
...
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_cq.c | 23 | ||||
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_device.h | 18 | ||||
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_eq.c | 146 | ||||
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_eq.h | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_hem.c | 76 | ||||
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_hem.h | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 293 | ||||
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_hw_v1.h | 9 | ||||
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_main.c | 36 | ||||
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_mr.c | 3 | ||||
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_pd.c | 22 | ||||
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_qp.c | 67 |
12 files changed, 385 insertions, 316 deletions
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c index 875597b0e69c..097365932b09 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cq.c +++ b/drivers/infiniband/hw/hns/hns_roce_cq.c @@ -83,8 +83,7 @@ static int hns_roce_sw2hw_cq(struct hns_roce_dev *dev, static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent, struct hns_roce_mtt *hr_mtt, struct hns_roce_uar *hr_uar, - struct hns_roce_cq *hr_cq, int vector, - int collapsed) + struct hns_roce_cq *hr_cq, int vector) { struct hns_roce_cmd_mailbox *mailbox = NULL; struct hns_roce_cq_table *cq_table = NULL; @@ -153,6 +152,9 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent, hr_cq->cons_index = 0; hr_cq->uar = hr_uar; + atomic_set(&hr_cq->refcount, 1); + init_completion(&hr_cq->free); + return 0; err_radix: @@ -192,6 +194,11 @@ static void hns_roce_free_cq(struct hns_roce_dev *hr_dev, /* Waiting interrupt process procedure carried out */ synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq); + /* wait for all interrupt processed */ + if (atomic_dec_and_test(&hr_cq->refcount)) + complete(&hr_cq->free); + wait_for_completion(&hr_cq->free); + spin_lock_irq(&cq_table->lock); radix_tree_delete(&cq_table->tree, hr_cq->cqn); spin_unlock_irq(&cq_table->lock); @@ -300,10 +307,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, cq_entries = roundup_pow_of_two((unsigned int)cq_entries); hr_cq->ib_cq.cqe = cq_entries - 1; - mutex_init(&hr_cq->resize_mutex); spin_lock_init(&hr_cq->lock); - hr_cq->hr_resize_buf = NULL; - hr_cq->resize_umem = NULL; if (context) { if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { @@ -338,8 +342,8 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, } /* Allocate cq index, fill cq_context */ - ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt, - uar, hr_cq, vector, 0); + ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt, uar, + hr_cq, vector); if (ret) { dev_err(dev, "Creat CQ .Failed to cq_alloc.\n"); goto err_mtt; @@ -353,12 +357,15 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, if (context) { if (ib_copy_to_udata(udata, &hr_cq->cqn, sizeof(u64))) { ret = -EFAULT; - goto err_mtt; + goto err_cqc; } } return &hr_cq->ib_cq; +err_cqc: + hns_roce_free_cq(hr_dev, hr_cq); + err_mtt: hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt); if (context) diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index ea735800eb18..341731553a60 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -62,7 +62,7 @@ #define HNS_ROCE_AEQE_OF_VEC_NUM 1 /* 4G/4K = 1M */ -#define HNS_ROCE_SL_SHIFT 29 +#define HNS_ROCE_SL_SHIFT 28 #define HNS_ROCE_TCLASS_SHIFT 20 #define HNS_ROCE_FLOW_LABLE_MASK 0xfffff @@ -74,7 +74,9 @@ #define MR_TYPE_DMA 0x03 #define PKEY_ID 0xffff +#define GUID_LEN 8 #define NODE_DESC_SIZE 64 +#define DB_REG_OFFSET 0x1000 #define SERV_TYPE_RC 0 #define SERV_TYPE_RD 1 @@ -282,20 +284,11 @@ struct hns_roce_cq_buf { struct hns_roce_mtt hr_mtt; }; -struct hns_roce_cq_resize { - struct hns_roce_cq_buf hr_buf; - int cqe; -}; - struct hns_roce_cq { struct ib_cq ib_cq; struct hns_roce_cq_buf hr_buf; - /* pointer to store information after resize*/ - struct hns_roce_cq_resize *hr_resize_buf; spinlock_t lock; - struct mutex resize_mutex; struct ib_umem *umem; - struct ib_umem *resize_umem; void (*comp)(struct hns_roce_cq *); void (*event)(struct hns_roce_cq *, enum hns_roce_event); @@ -408,6 +401,7 @@ struct hns_roce_qp { u32 buff_size; struct mutex mutex; u8 port; + u8 phy_port; u8 sl; u8 resp_depth; u8 state; @@ -471,7 +465,6 @@ struct hns_roce_caps { u32 max_rq_desc_sz; /* 64 */ int max_qp_init_rdma; int max_qp_dest_rdma; - int sqp_start; int num_cqs; int max_cqes; int reserved_cqs; @@ -512,6 +505,8 @@ struct hns_roce_hw { void (*write_cqc)(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts, dma_addr_t dma_handle, int nent, u32 vector); + int (*clear_hem)(struct hns_roce_dev *hr_dev, + struct hns_roce_hem_table *table, int obj); int (*query_qp)(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr); int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr, @@ -533,7 +528,6 @@ struct hns_roce_dev { struct hns_roce_uar priv_uar; const char *irq_names[HNS_ROCE_MAX_IRQ_NUM]; spinlock_t sm_lock; - spinlock_t cq_db_lock; spinlock_t bt_cmd_lock; struct hns_roce_ib_iboe iboe; diff --git a/drivers/infiniband/hw/hns/hns_roce_eq.c b/drivers/infiniband/hw/hns/hns_roce_eq.c index 98af7fecf2f1..21e21b03cfb5 100644 --- a/drivers/infiniband/hw/hns/hns_roce_eq.c +++ b/drivers/infiniband/hw/hns/hns_roce_eq.c @@ -66,9 +66,6 @@ static void hns_roce_wq_catas_err_handle(struct hns_roce_dev *hr_dev, { struct device *dev = &hr_dev->pdev->dev; - qpn = roce_get_field(aeqe->event.qp_event.qp, - HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M, - HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S); dev_warn(dev, "Local Work Queue Catastrophic Error.\n"); switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) { @@ -96,13 +93,6 @@ static void hns_roce_wq_catas_err_handle(struct hns_roce_dev *hr_dev, default: break; } - - hns_roce_qp_event(hr_dev, roce_get_field(aeqe->event.qp_event.qp, - HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M, - HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S), - roce_get_field(aeqe->asyn, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)); } static void hns_roce_local_wq_access_err_handle(struct hns_roce_dev *hr_dev, @@ -111,9 +101,6 @@ static void hns_roce_local_wq_access_err_handle(struct hns_roce_dev *hr_dev, { struct device *dev = &hr_dev->pdev->dev; - qpn = roce_get_field(aeqe->event.qp_event.qp, - HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M, - HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S); dev_warn(dev, "Local Access Violation Work Queue Error.\n"); switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) { @@ -141,13 +128,69 @@ static void hns_roce_local_wq_access_err_handle(struct hns_roce_dev *hr_dev, default: break; } +} + +static void hns_roce_qp_err_handle(struct hns_roce_dev *hr_dev, + struct hns_roce_aeqe *aeqe, + int event_type) +{ + struct device *dev = &hr_dev->pdev->dev; + int phy_port; + int qpn; + + qpn = roce_get_field(aeqe->event.qp_event.qp, + HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M, + HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S); + phy_port = roce_get_field(aeqe->event.qp_event.qp, + HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M, + HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S); + if (qpn <= 1) + qpn = HNS_ROCE_MAX_PORTS * qpn + phy_port; + + switch (event_type) { + case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: + dev_warn(dev, "Invalid Req Local Work Queue Error.\n" + "QP %d, phy_port %d.\n", qpn, phy_port); + break; + case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: + hns_roce_wq_catas_err_handle(hr_dev, aeqe, qpn); + break; + case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: + hns_roce_local_wq_access_err_handle(hr_dev, aeqe, qpn); + break; + default: + break; + } + + hns_roce_qp_event(hr_dev, qpn, event_type); +} + +static void hns_roce_cq_err_handle(struct hns_roce_dev *hr_dev, + struct hns_roce_aeqe *aeqe, + int event_type) +{ + struct device *dev = &hr_dev->pdev->dev; + u32 cqn; + + cqn = le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq, + HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M, + HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)); + + switch (event_type) { + case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR: + dev_warn(dev, "CQ 0x%x access err.\n", cqn); + break; + case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW: + dev_warn(dev, "CQ 0x%x overflow\n", cqn); + break; + case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID: + dev_warn(dev, "CQ 0x%x ID invalid.\n", cqn); + break; + default: + break; + } - hns_roce_qp_event(hr_dev, roce_get_field(aeqe->event.qp_event.qp, - HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M, - HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S), - roce_get_field(aeqe->asyn, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)); + hns_roce_cq_event(hr_dev, cqn, event_type); } static void hns_roce_db_overflow_handle(struct hns_roce_dev *hr_dev, @@ -185,7 +228,7 @@ static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) struct device *dev = &hr_dev->pdev->dev; struct hns_roce_aeqe *aeqe; int aeqes_found = 0; - int qpn = 0; + int event_type; while ((aeqe = next_aeqe_sw(eq))) { dev_dbg(dev, "aeqe = %p, aeqe->asyn.event_type = 0x%lx\n", aeqe, @@ -195,9 +238,10 @@ static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) /* Memory barrier */ rmb(); - switch (roce_get_field(aeqe->asyn, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)) { + event_type = roce_get_field(aeqe->asyn, + HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, + HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S); + switch (event_type) { case HNS_ROCE_EVENT_TYPE_PATH_MIG: dev_warn(dev, "PATH MIG not supported\n"); break; @@ -211,23 +255,9 @@ static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) dev_warn(dev, "PATH MIG failed\n"); break; case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: - dev_warn(dev, "qpn = 0x%lx\n", - roce_get_field(aeqe->event.qp_event.qp, - HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M, - HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S)); - hns_roce_qp_event(hr_dev, - roce_get_field(aeqe->event.qp_event.qp, - HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M, - HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S), - roce_get_field(aeqe->asyn, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)); - break; case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: - hns_roce_wq_catas_err_handle(hr_dev, aeqe, qpn); - break; case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: - hns_roce_local_wq_access_err_handle(hr_dev, aeqe, qpn); + hns_roce_qp_err_handle(hr_dev, aeqe, event_type); break; case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH: case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR: @@ -235,40 +265,9 @@ static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) dev_warn(dev, "SRQ not support!\n"); break; case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR: - dev_warn(dev, "CQ 0x%lx access err.\n", - roce_get_field(aeqe->event.cq_event.cq, - HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M, - HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)); - hns_roce_cq_event(hr_dev, - le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq, - HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M, - HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)), - roce_get_field(aeqe->asyn, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)); - break; case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW: - dev_warn(dev, "CQ 0x%lx overflow\n", - roce_get_field(aeqe->event.cq_event.cq, - HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M, - HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)); - hns_roce_cq_event(hr_dev, - le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq, - HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M, - HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)), - roce_get_field(aeqe->asyn, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)); - break; case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID: - dev_warn(dev, "CQ ID invalid.\n"); - hns_roce_cq_event(hr_dev, - le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq, - HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M, - HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)), - roce_get_field(aeqe->asyn, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)); + hns_roce_cq_err_handle(hr_dev, aeqe, event_type); break; case HNS_ROCE_EVENT_TYPE_PORT_CHANGE: dev_warn(dev, "port change.\n"); @@ -290,11 +289,8 @@ static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S)); break; default: - dev_warn(dev, "Unhandled event 0x%lx on EQ %d at index %u\n", - roce_get_field(aeqe->asyn, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S), - eq->eqn, eq->cons_index); + dev_warn(dev, "Unhandled event %d on EQ %d at index %u\n", + event_type, eq->eqn, eq->cons_index); break; }; diff --git a/drivers/infiniband/hw/hns/hns_roce_eq.h b/drivers/infiniband/hw/hns/hns_roce_eq.h index fe4388191a3c..c6d212d12e03 100644 --- a/drivers/infiniband/hw/hns/hns_roce_eq.h +++ b/drivers/infiniband/hw/hns/hns_roce_eq.h @@ -107,6 +107,10 @@ struct hns_roce_aeqe { #define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M \ (((1UL << 24) - 1) << HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S) +#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S 25 +#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M \ + (((1UL << 3) - 1) << HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S) + #define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S 0 #define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M \ (((1UL << 16) - 1) << HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S) diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c index d53d64362389..250d8f280390 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.c +++ b/drivers/infiniband/hw/hns/hns_roce_hem.c @@ -36,14 +36,10 @@ #include "hns_roce_hem.h" #include "hns_roce_common.h" -#define HW_SYNC_TIMEOUT_MSECS 500 -#define HW_SYNC_SLEEP_TIME_INTERVAL 20 - #define HNS_ROCE_HEM_ALLOC_SIZE (1 << 17) #define HNS_ROCE_TABLE_CHUNK_SIZE (1 << 17) #define DMA_ADDR_T_SHIFT 12 -#define BT_CMD_SYNC_SHIFT 31 #define BT_BA_SHIFT 32 struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, int npages, @@ -213,74 +209,6 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev, return ret; } -static int hns_roce_clear_hem(struct hns_roce_dev *hr_dev, - struct hns_roce_hem_table *table, - unsigned long obj) -{ - struct device *dev = &hr_dev->pdev->dev; - unsigned long end = 0; - unsigned long flags; - void __iomem *bt_cmd; - uint32_t bt_cmd_val[2]; - u32 bt_cmd_h_val = 0; - int ret = 0; - - switch (table->type) { - case HEM_TYPE_QPC: - roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, - ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_QPC); - break; - case HEM_TYPE_MTPT: - roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, - ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, - HEM_TYPE_MTPT); - break; - case HEM_TYPE_CQC: - roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, - ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_CQC); - break; - case HEM_TYPE_SRQC: - roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, - ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, - HEM_TYPE_SRQC); - break; - default: - return ret; - } - roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M, - ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj); - roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0); - roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1); - roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M, - ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, 0); - - spin_lock_irqsave(&hr_dev->bt_cmd_lock, flags); - - bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG; - - end = msecs_to_jiffies(HW_SYNC_TIMEOUT_MSECS) + jiffies; - while (1) { - if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) { - if (!(time_before(jiffies, end))) { - dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n"); - spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, - flags); - return -EBUSY; - } - } else { - break; - } - msleep(HW_SYNC_SLEEP_TIME_INTERVAL); - } - - bt_cmd_val[0] = 0; - bt_cmd_val[1] = bt_cmd_h_val; - hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG); - spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags); - - return ret; -} - int hns_roce_table_get(struct hns_roce_dev *hr_dev, struct hns_roce_hem_table *table, unsigned long obj) { @@ -333,7 +261,7 @@ void hns_roce_table_put(struct hns_roce_dev *hr_dev, if (--table->hem[i]->refcount == 0) { /* Clear HEM base address */ - if (hns_roce_clear_hem(hr_dev, table, obj)) + if (hr_dev->hw->clear_hem(hr_dev, table, obj)) dev_warn(dev, "Clear HEM base address failed.\n"); hns_roce_free_hem(hr_dev, table->hem[i]); @@ -456,7 +384,7 @@ void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev, for (i = 0; i < table->num_hem; ++i) if (table->hem[i]) { - if (hns_roce_clear_hem(hr_dev, table, + if (hr_dev->hw->clear_hem(hr_dev, table, i * HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size)) dev_err(dev, "Clear HEM base address failed.\n"); diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.h b/drivers/infiniband/hw/hns/hns_roce_hem.h index ad6617588fba..435748858252 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.h +++ b/drivers/infiniband/hw/hns/hns_roce_hem.h @@ -34,6 +34,10 @@ #ifndef _HNS_ROCE_HEM_H #define _HNS_ROCE_HEM_H +#define HW_SYNC_TIMEOUT_MSECS 500 +#define HW_SYNC_SLEEP_TIME_INTERVAL 20 +#define BT_CMD_SYNC_SHIFT 31 + enum { /* MAP HEM(Hardware Entry Memory) */ HEM_TYPE_QPC = 0, diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 399f5dedaf2d..71232e5fabf6 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -73,8 +73,14 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, u32 ind = 0; int ret = 0; - spin_lock_irqsave(&qp->sq.lock, flags); + if (unlikely(ibqp->qp_type != IB_QPT_GSI && + ibqp->qp_type != IB_QPT_RC)) { + dev_err(dev, "un-supported QP type\n"); + *bad_wr = NULL; + return -EOPNOTSUPP; + } + spin_lock_irqsave(&qp->sq.lock, flags); ind = qp->sq_next_wqe; for (nreq = 0; wr; ++nreq, wr = wr->next) { if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { @@ -162,7 +168,7 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, roce_set_field(ud_sq_wqe->u32_36, UD_SEND_WQE_U32_36_SGID_INDEX_M, UD_SEND_WQE_U32_36_SGID_INDEX_S, - hns_get_gid_index(hr_dev, qp->port, + hns_get_gid_index(hr_dev, qp->phy_port, ah->av.gid_index)); roce_set_field(ud_sq_wqe->u32_40, @@ -205,8 +211,7 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, (wr->send_flags & IB_SEND_FENCE ? (cpu_to_le32(HNS_ROCE_WQE_FENCE)) : 0); - wqe = (struct hns_roce_wqe_ctrl_seg *)wqe + - sizeof(struct hns_roce_wqe_ctrl_seg); + wqe += sizeof(struct hns_roce_wqe_ctrl_seg); switch (wr->opcode) { case IB_WR_RDMA_READ: @@ -235,8 +240,7 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, break; } ctrl->flag |= cpu_to_le32(ps_opcode); - wqe = (struct hns_roce_wqe_raddr_seg *)wqe + - sizeof(struct hns_roce_wqe_raddr_seg); + wqe += sizeof(struct hns_roce_wqe_raddr_seg); dseg = wqe; if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) { @@ -253,8 +257,7 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, memcpy(wqe, ((void *) (uintptr_t) wr->sg_list[i].addr), wr->sg_list[i].length); - wqe = (struct hns_roce_wqe_raddr_seg *) - wqe + wr->sg_list[i].length; + wqe += wr->sg_list[i].length; } ctrl->flag |= HNS_ROCE_WQE_INLINE; } else { @@ -266,9 +269,6 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, HNS_ROCE_WQE_SGE_NUM_BIT); } ind++; - } else { - dev_dbg(dev, "unSupported QP type\n"); - break; } } @@ -285,7 +285,7 @@ out: SQ_DOORBELL_U32_4_SQ_HEAD_S, (qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1))); roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_PORT_M, - SQ_DOORBELL_U32_4_PORT_S, qp->port); + SQ_DOORBELL_U32_4_PORT_S, qp->phy_port); roce_set_field(sq_db.u32_8, SQ_DOORBELL_U32_8_QPN_M, SQ_DOORBELL_U32_8_QPN_S, qp->doorbell_qpn); roce_set_bit(sq_db.u32_8, SQ_DOORBELL_HW_SYNC_S, 1); @@ -365,14 +365,14 @@ out: /* SW update GSI rq header */ reg_val = roce_read(to_hr_dev(ibqp->device), ROCEE_QP1C_CFG3_0_REG + - QP1C_CFGN_OFFSET * hr_qp->port); + QP1C_CFGN_OFFSET * hr_qp->phy_port); roce_set_field(reg_val, ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M, ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S, hr_qp->rq.head); roce_write(to_hr_dev(ibqp->device), ROCEE_QP1C_CFG3_0_REG + - QP1C_CFGN_OFFSET * hr_qp->port, reg_val); + QP1C_CFGN_OFFSET * hr_qp->phy_port, reg_val); } else { rq_db.u32_4 = 0; rq_db.u32_8 = 0; @@ -789,6 +789,66 @@ static void hns_roce_port_enable(struct hns_roce_dev *hr_dev, int enable_flag) } } +static int hns_roce_bt_init(struct hns_roce_dev *hr_dev) +{ + struct device *dev = &hr_dev->pdev->dev; + struct hns_roce_v1_priv *priv; + int ret; + + priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv; + + priv->bt_table.qpc_buf.buf = dma_alloc_coherent(dev, + HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.qpc_buf.map, + GFP_KERNEL); + if (!priv->bt_table.qpc_buf.buf) + return -ENOMEM; + + priv->bt_table.mtpt_buf.buf = dma_alloc_coherent(dev, + HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.mtpt_buf.map, + GFP_KERNEL); + if (!priv->bt_table.mtpt_buf.buf) { + ret = -ENOMEM; + goto err_failed_alloc_mtpt_buf; + } + + priv->bt_table.cqc_buf.buf = dma_alloc_coherent(dev, + HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.cqc_buf.map, + GFP_KERNEL); + if (!priv->bt_table.cqc_buf.buf) { + ret = -ENOMEM; + goto err_failed_alloc_cqc_buf; + } + + return 0; + +err_failed_alloc_cqc_buf: + dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, + priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map); + +err_failed_alloc_mtpt_buf: + dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, + priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map); + + return ret; +} + +static void hns_roce_bt_free(struct hns_roce_dev *hr_dev) +{ + struct device *dev = &hr_dev->pdev->dev; + struct hns_roce_v1_priv *priv; + + priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv; + + dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, + priv->bt_table.cqc_buf.buf, priv->bt_table.cqc_buf.map); + + dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, + priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map); + + dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, + priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map); +} + /** * hns_roce_v1_reset - reset RoCE * @hr_dev: RoCE device struct pointer @@ -879,7 +939,6 @@ void hns_roce_v1_profile(struct hns_roce_dev *hr_dev) caps->mtt_entry_sz = HNS_ROCE_V1_MTT_ENTRY_SIZE; caps->cq_entry_sz = HNS_ROCE_V1_CQE_ENTRY_SIZE; caps->page_size_cap = HNS_ROCE_V1_PAGE_SIZE_SUPPORT; - caps->sqp_start = 0; caps->reserved_lkey = 0; caps->reserved_pds = 0; caps->reserved_mrws = 1; @@ -944,8 +1003,18 @@ int hns_roce_v1_init(struct hns_roce_dev *hr_dev) hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_UP); + ret = hns_roce_bt_init(hr_dev); + if (ret) { + dev_err(dev, "bt init failed!\n"); + goto error_failed_bt_init; + } + return 0; +error_failed_bt_init: + hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN); + hns_roce_raq_free(hr_dev); + error_failed_raq_init: hns_roce_db_free(hr_dev); return ret; @@ -953,6 +1022,7 @@ error_failed_raq_init: void hns_roce_v1_exit(struct hns_roce_dev *hr_dev) { + hns_roce_bt_free(hr_dev); hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN); hns_roce_raq_free(hr_dev); hns_roce_db_free(hr_dev); @@ -1192,9 +1262,7 @@ static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq) return get_sw_cqe(hr_cq, hr_cq->cons_index); } -void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index, - spinlock_t *doorbell_lock) - +void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index) { u32 doorbell[2]; @@ -1254,8 +1322,7 @@ static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn, */ wmb(); - hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index, - &to_hr_dev(hr_cq->ib_cq.device)->cq_db_lock); + hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index); } } @@ -1485,7 +1552,8 @@ static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq, /* SQ conrespond to CQE */ sq_wqe = get_send_wqe(*cur_qp, roce_get_field(cqe->cqe_byte_4, CQE_BYTE_4_WQE_INDEX_M, - CQE_BYTE_4_WQE_INDEX_S)); + CQE_BYTE_4_WQE_INDEX_S)& + ((*cur_qp)->sq.wqe_cnt-1)); switch (sq_wqe->flag & HNS_ROCE_WQE_OPCODE_MASK) { case HNS_ROCE_WQE_OPCODE_SEND: wc->opcode = IB_WC_SEND; @@ -1591,10 +1659,8 @@ int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) break; } - if (npolled) { - hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index, - &to_hr_dev(ibcq->device)->cq_db_lock); - } + if (npolled) + hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index); spin_unlock_irqrestore(&hr_cq->lock, flags); @@ -1604,6 +1670,74 @@ int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) return ret; } +int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev, + struct hns_roce_hem_table *table, int obj) +{ + struct device *dev = &hr_dev->pdev->dev; + struct hns_roce_v1_priv *priv; + unsigned long end = 0, flags = 0; + uint32_t bt_cmd_val[2] = {0}; + void __iomem *bt_cmd; + u64 bt_ba = 0; + + priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv; + + switch (table->type) { + case HEM_TYPE_QPC: + roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, + ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_QPC); + bt_ba = priv->bt_table.qpc_buf.map >> 12; + break; + case HEM_TYPE_MTPT: + roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, + ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_MTPT); + bt_ba = priv->bt_table.mtpt_buf.map >> 12; + break; + case HEM_TYPE_CQC: + roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, + ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_CQC); + bt_ba = priv->bt_table.cqc_buf.map >> 12; + break; + case HEM_TYPE_SRQC: + dev_dbg(dev, "HEM_TYPE_SRQC not support.\n"); + return -EINVAL; + default: + return 0; + } + roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M, + ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj); + roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0); + roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1); + + spin_lock_irqsave(&hr_dev->bt_cmd_lock, flags); + + bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG; + + end = msecs_to_jiffies(HW_SYNC_TIMEOUT_MSECS) + jiffies; + while (1) { + if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) { + if (!(time_before(jiffies, end))) { + dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n"); + spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, + flags); + return -EBUSY; + } + } else { + break; + } + msleep(HW_SYNC_SLEEP_TIME_INTERVAL); + } + + bt_cmd_val[0] = (uint32_t)bt_ba; + roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M, + ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, bt_ba >> 32); + hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG); + + spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags); + + return 0; +} + static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev, struct hns_roce_mtt *mtt, enum hns_roce_qp_state cur_state, @@ -1733,13 +1867,10 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_HEAD_M, QP1C_BYTES_16_RQ_HEAD_S, hr_qp->rq.head); roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_PORT_NUM_M, - QP1C_BYTES_16_PORT_NUM_S, hr_qp->port); + QP1C_BYTES_16_PORT_NUM_S, hr_qp->phy_port); roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SIGNALING_TYPE_S, hr_qp->sq_signal_bits); - roce_set_bit(context->qp1c_bytes_16, - QP1C_BYTES_16_LOCAL_ENABLE_E2E_CREDIT_S, - hr_qp->sq_signal_bits); roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_BA_FLG_S, 1); roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SQ_BA_FLG_S, @@ -1784,7 +1915,7 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, /* Copy context to QP1C register */ addr = (u32 *)(hr_dev->reg_base + ROCEE_QP1C_CFG0_0_REG + - hr_qp->port * sizeof(*context)); + hr_qp->phy_port * sizeof(*context)); writel(context->qp1c_bytes_4, addr); writel(context->sq_rq_bt_l, addr + 1); @@ -1795,15 +1926,16 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, writel(context->qp1c_bytes_28, addr + 6); writel(context->qp1c_bytes_32, addr + 7); writel(context->cur_sq_wqe_ba_l, addr + 8); + writel(context->qp1c_bytes_40, addr + 9); } /* Modify QP1C status */ reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG0_0_REG + - hr_qp->port * sizeof(*context)); + hr_qp->phy_port * sizeof(*context)); roce_set_field(reg_val, ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M, ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S, new_state); roce_write(hr_dev, ROCEE_QP1C_CFG0_0_REG + - hr_qp->port * sizeof(*context), reg_val); + hr_qp->phy_port * sizeof(*context), reg_val); hr_qp->state = new_state; if (new_state == IB_QPS_RESET) { @@ -1836,12 +1968,10 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); struct device *dev = &hr_dev->pdev->dev; struct hns_roce_qp_context *context; - struct hns_roce_rq_db rq_db; dma_addr_t dma_handle_2 = 0; dma_addr_t dma_handle = 0; uint32_t doorbell[2] = {0}; int rq_pa_start = 0; - u32 reg_val = 0; u64 *mtts_2 = NULL; int ret = -EINVAL; u64 *mtts = NULL; @@ -2119,7 +2249,8 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, roce_set_field(context->qpc_bytes_68, QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_M, - QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S, 0); + QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S, + hr_qp->rq.head); roce_set_field(context->qpc_bytes_68, QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M, QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S, 0); @@ -2186,7 +2317,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, roce_set_field(context->qpc_bytes_156, QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M, QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S, - hr_qp->port); + hr_qp->phy_port); roce_set_field(context->qpc_bytes_156, QP_CONTEXT_QPC_BYTES_156_SL_M, QP_CONTEXT_QPC_BYTES_156_SL_S, attr->ah_attr.sl); @@ -2257,20 +2388,17 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, roce_set_bit(context->qpc_bytes_140, QP_CONTEXT_QPC_BYTES_140_RNR_RETRY_FLG_S, 0); - roce_set_field(context->qpc_bytes_144, - QP_CONTEXT_QPC_BYTES_144_QP_STATE_M, - QP_CONTEXT_QPC_BYTES_144_QP_STATE_S, - attr->qp_state); - roce_set_field(context->qpc_bytes_148, QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_M, QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S, 0); roce_set_field(context->qpc_bytes_148, QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M, - QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S, 0); + QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S, + attr->retry_cnt); roce_set_field(context->qpc_bytes_148, QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_M, - QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S, 0); + QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S, + attr->rnr_retry); roce_set_field(context->qpc_bytes_148, QP_CONTEXT_QPC_BYTES_148_LSN_M, QP_CONTEXT_QPC_BYTES_148_LSN_S, 0x100); @@ -2281,10 +2409,19 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_M, QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S, attr->retry_cnt); - roce_set_field(context->qpc_bytes_156, - QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M, - QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S, - attr->timeout); + if (attr->timeout < 0x12) { + dev_info(dev, "ack timeout value(0x%x) must bigger than 0x12.\n", + attr->timeout); + roce_set_field(context->qpc_bytes_156, + QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M, + QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S, + 0x12); + } else { + roce_set_field(context->qpc_bytes_156, + QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M, + QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S, + attr->timeout); + } roce_set_field(context->qpc_bytes_156, QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_M, QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S, @@ -2292,7 +2429,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, roce_set_field(context->qpc_bytes_156, QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M, QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S, - hr_qp->port); + hr_qp->phy_port); roce_set_field(context->qpc_bytes_156, QP_CONTEXT_QPC_BYTES_156_SL_M, QP_CONTEXT_QPC_BYTES_156_SL_S, attr->ah_attr.sl); @@ -2357,21 +2494,15 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M, QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S, 0); - } else if ((cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) || + } else if (!((cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) || (cur_state == IB_QPS_INIT && new_state == IB_QPS_ERR) || (cur_state == IB_QPS_RTR && new_state == IB_QPS_RESET) || (cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) || (cur_state == IB_QPS_RTS && new_state == IB_QPS_RESET) || (cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) || (cur_state == IB_QPS_ERR && new_state == IB_QPS_RESET) || - (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR)) { - roce_set_field(context->qpc_bytes_144, - QP_CONTEXT_QPC_BYTES_144_QP_STATE_M, - QP_CONTEXT_QPC_BYTES_144_QP_STATE_S, - attr->qp_state); - - } else { - dev_err(dev, "not support this modify\n"); + (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR))) { + dev_err(dev, "not support this status migration\n"); goto out; } @@ -2397,43 +2528,32 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) { /* Memory barrier */ wmb(); - if (hr_qp->ibqp.qp_type == IB_QPT_GSI) { - /* SW update GSI rq header */ - reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG3_0_REG + - QP1C_CFGN_OFFSET * hr_qp->port); - roce_set_field(reg_val, - ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M, - ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S, - hr_qp->rq.head); - roce_write(hr_dev, ROCEE_QP1C_CFG3_0_REG + - QP1C_CFGN_OFFSET * hr_qp->port, reg_val); - } else { - rq_db.u32_4 = 0; - rq_db.u32_8 = 0; - - roce_set_field(rq_db.u32_4, RQ_DOORBELL_U32_4_RQ_HEAD_M, - RQ_DOORBELL_U32_4_RQ_HEAD_S, - hr_qp->rq.head); - roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_QPN_M, - RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn); - roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_CMD_M, - RQ_DOORBELL_U32_8_CMD_S, 1); - roce_set_bit(rq_db.u32_8, RQ_DOORBELL_U32_8_HW_SYNC_S, - 1); - doorbell[0] = rq_db.u32_4; - doorbell[1] = rq_db.u32_8; - - hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l); + roce_set_field(doorbell[0], RQ_DOORBELL_U32_4_RQ_HEAD_M, + RQ_DOORBELL_U32_4_RQ_HEAD_S, hr_qp->rq.head); + roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_QPN_M, + RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn); + roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_CMD_M, + RQ_DOORBELL_U32_8_CMD_S, 1); + roce_set_bit(doorbell[1], RQ_DOORBELL_U32_8_HW_SYNC_S, 1); + + if (ibqp->uobject) { + hr_qp->rq.db_reg_l = hr_dev->reg_base + + ROCEE_DB_OTHERS_L_0_REG + + DB_REG_OFFSET * hr_dev->priv_uar.index; } + + hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l); } hr_qp->state = new_state; if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) hr_qp->resp_depth = attr->max_dest_rd_atomic; - if (attr_mask & IB_QP_PORT) - hr_qp->port = (attr->port_num - 1); + if (attr_mask & IB_QP_PORT) { + hr_qp->port = attr->port_num - 1; + hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port]; + } if (new_state == IB_QPS_RESET && !ibqp->uobject) { hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn, @@ -2789,6 +2909,7 @@ struct hns_roce_hw hns_roce_hw_v1 = { .set_mtu = hns_roce_v1_set_mtu, .write_mtpt = hns_roce_v1_write_mtpt, .write_cqc = hns_roce_v1_write_cqc, + .clear_hem = hns_roce_v1_clear_hem, .modify_qp = hns_roce_v1_modify_qp, .query_qp = hns_roce_v1_query_qp, .destroy_qp = hns_roce_v1_destroy_qp, diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h index 316b592b1636..539b0a3b92b0 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h @@ -102,6 +102,8 @@ #define HNS_ROCE_V1_EXT_ODB_ALFUL \ (HNS_ROCE_V1_EXT_ODB_DEPTH - HNS_ROCE_V1_DB_RSVD) +#define HNS_ROCE_BT_RSV_BUF_SIZE (1 << 17) + #define HNS_ROCE_ODB_POLL_MODE 0 #define HNS_ROCE_SDB_NORMAL_MODE 0 @@ -971,9 +973,16 @@ struct hns_roce_db_table { struct hns_roce_ext_db *ext_db; }; +struct hns_roce_bt_table { + struct hns_roce_buf_list qpc_buf; + struct hns_roce_buf_list mtpt_buf; + struct hns_roce_buf_list cqc_buf; +}; + struct hns_roce_v1_priv { struct hns_roce_db_table db_table; struct hns_roce_raq_table raq_table; + struct hns_roce_bt_table bt_table; }; int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset); diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index f64f0dde9a88..764e35a54457 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -355,8 +355,7 @@ static int hns_roce_query_device(struct ib_device *ib_dev, props->max_qp = hr_dev->caps.num_qps; props->max_qp_wr = hr_dev->caps.max_wqes; props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT | - IB_DEVICE_RC_RNR_NAK_GEN | - IB_DEVICE_LOCAL_DMA_LKEY; + IB_DEVICE_RC_RNR_NAK_GEN; props->max_sge = hr_dev->caps.max_sq_sg; props->max_sge_rd = 1; props->max_cq = hr_dev->caps.num_cqs; @@ -372,6 +371,25 @@ static int hns_roce_query_device(struct ib_device *ib_dev, return 0; } +static struct net_device *hns_roce_get_netdev(struct ib_device *ib_dev, + u8 port_num) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); + struct net_device *ndev; + + if (port_num < 1 || port_num > hr_dev->caps.num_ports) + return NULL; + + rcu_read_lock(); + + ndev = hr_dev->iboe.netdevs[port_num - 1]; + if (ndev) + dev_hold(ndev); + + rcu_read_unlock(); + return ndev; +} + static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num, struct ib_port_attr *props) { @@ -584,6 +602,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev) struct device *dev = &hr_dev->pdev->dev; iboe = &hr_dev->iboe; + spin_lock_init(&iboe->lock); ib_dev = &hr_dev->ib_dev; strlcpy(ib_dev->name, "hisi_%d", IB_DEVICE_NAME_MAX); @@ -618,6 +637,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev) ib_dev->query_port = hns_roce_query_port; ib_dev->modify_port = hns_roce_modify_port; ib_dev->get_link_layer = hns_roce_get_link_layer; + ib_dev->get_netdev = hns_roce_get_netdev; ib_dev->query_gid = hns_roce_query_gid; ib_dev->query_pkey = hns_roce_query_pkey; ib_dev->alloc_ucontext = hns_roce_alloc_ucontext; @@ -667,8 +687,6 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev) goto error_failed_setup_mtu_gids; } - spin_lock_init(&iboe->lock); - iboe->nb.notifier_call = hns_roce_netdev_event; ret = register_netdevice_notifier(&iboe->nb); if (ret) { @@ -777,6 +795,15 @@ static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev) if (IS_ERR(hr_dev->reg_base)) return PTR_ERR(hr_dev->reg_base); + /* read the node_guid of IB device from the DT or ACPI */ + ret = device_property_read_u8_array(dev, "node-guid", + (u8 *)&hr_dev->ib_dev.node_guid, + GUID_LEN); + if (ret) { + dev_err(dev, "couldn't get node_guid from DT or ACPI!\n"); + return ret; + } + /* get the RoCE associated ethernet ports or netdevices */ for (i = 0; i < HNS_ROCE_MAX_PORTS; i++) { if (dev_of_node(dev)) { @@ -923,7 +950,6 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev) struct device *dev = &hr_dev->pdev->dev; spin_lock_init(&hr_dev->sm_lock); - spin_lock_init(&hr_dev->cq_db_lock); spin_lock_init(&hr_dev->bt_cmd_lock); ret = hns_roce_init_uar_table(hr_dev); diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index 59f5e2be046b..fb87883ead34 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -564,11 +564,14 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, if (mr->umem->page_size != HNS_ROCE_HEM_PAGE_SIZE) { dev_err(dev, "Just support 4K page size but is 0x%x now!\n", mr->umem->page_size); + ret = -EINVAL; + goto err_umem; } if (n > HNS_ROCE_MAX_MTPT_PBL_NUM) { dev_err(dev, " MR len %lld err. MR is limited to 4G at most!\n", length); + ret = -EINVAL; goto err_umem; } diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c index 16271b5bd170..05db7d59812a 100644 --- a/drivers/infiniband/hw/hns/hns_roce_pd.c +++ b/drivers/infiniband/hw/hns/hns_roce_pd.c @@ -35,19 +35,7 @@ static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn) { - struct device *dev = &hr_dev->pdev->dev; - unsigned long pd_number; - int ret = 0; - - ret = hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, &pd_number); - if (ret == -1) { - dev_err(dev, "alloc pdn from pdbitmap failed\n"); - return -ENOMEM; - } - - *pdn = pd_number; - - return 0; + return hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, pdn); } static void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn) @@ -117,9 +105,15 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar) if (ret == -1) return -ENOMEM; - uar->index = (uar->index - 1) % hr_dev->caps.phy_num_uars + 1; + if (uar->index > 0) + uar->index = (uar->index - 1) % + (hr_dev->caps.phy_num_uars - 1) + 1; res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&hr_dev->pdev->dev, "memory resource not found!\n"); + return -EINVAL; + } uar->pfn = ((res->start) >> PAGE_SHIFT) + uar->index; return 0; diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 645c18d809a5..e86dd8d06777 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -32,14 +32,14 @@ */ #include <linux/platform_device.h> +#include <rdma/ib_addr.h> #include <rdma/ib_umem.h> #include "hns_roce_common.h" #include "hns_roce_device.h" #include "hns_roce_hem.h" #include "hns_roce_user.h" -#define DB_REG_OFFSET 0x1000 -#define SQP_NUM 12 +#define SQP_NUM (2 * HNS_ROCE_MAX_PORTS) void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type) { @@ -113,16 +113,8 @@ static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt, int align, unsigned long *base) { struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; - int ret = 0; - unsigned long qpn; - - ret = hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, &qpn); - if (ret == -1) - return -ENOMEM; - - *base = qpn; - return 0; + return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, base); } enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state) @@ -255,7 +247,7 @@ void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn, { struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; - if (base_qpn < (hr_dev->caps.sqp_start + 2 * hr_dev->caps.num_ports)) + if (base_qpn < SQP_NUM) return; hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt); @@ -345,12 +337,10 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap, - enum ib_qp_type type, struct hns_roce_qp *hr_qp) { struct device *dev = &hr_dev->pdev->dev; u32 max_cnt; - (void)type; if (cap->max_send_wr > hr_dev->caps.max_wqes || cap->max_send_sge > hr_dev->caps.max_sq_sg || @@ -476,7 +466,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, /* Set SQ size */ ret = hns_roce_set_kernel_sq_size(hr_dev, &init_attr->cap, - init_attr->qp_type, hr_qp); + hr_qp); if (ret) { dev_err(dev, "hns_roce_set_kernel_sq_size error!\n"); goto err_out; @@ -617,21 +607,19 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, return ERR_PTR(-ENOMEM); hr_qp = &hr_sqp->hr_qp; + hr_qp->port = init_attr->port_num - 1; + hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port]; + hr_qp->ibqp.qp_num = HNS_ROCE_MAX_PORTS + + hr_dev->iboe.phy_port[hr_qp->port]; ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, - hr_dev->caps.sqp_start + - hr_dev->caps.num_ports + - init_attr->port_num - 1, hr_qp); + hr_qp->ibqp.qp_num, hr_qp); if (ret) { dev_err(dev, "Create GSI QP failed!\n"); kfree(hr_sqp); return ERR_PTR(ret); } - hr_qp->port = (init_attr->port_num - 1); - hr_qp->ibqp.qp_num = hr_dev->caps.sqp_start + - hr_dev->caps.num_ports + - init_attr->port_num - 1; break; } default:{ @@ -670,6 +658,7 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, struct device *dev = &hr_dev->pdev->dev; int ret = -EINVAL; int p; + enum ib_mtu active_mtu; mutex_lock(&hr_qp->mutex); @@ -700,6 +689,19 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, } } + if (attr_mask & IB_QP_PATH_MTU) { + p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; + active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu); + + if (attr->path_mtu > IB_MTU_2048 || + attr->path_mtu < IB_MTU_256 || + attr->path_mtu > active_mtu) { + dev_err(dev, "attr path_mtu(%d)invalid while modify qp", + attr->path_mtu); + goto out; + } + } + if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) { dev_err(dev, "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n", @@ -782,29 +784,11 @@ static void *get_wqe(struct hns_roce_qp *hr_qp, int offset) void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n) { - struct ib_qp *ibqp = &hr_qp->ibqp; - struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); - - if ((n < 0) || (n > hr_qp->rq.wqe_cnt)) { - dev_err(&hr_dev->pdev->dev, "rq wqe index:%d,rq wqe cnt:%d\r\n", - n, hr_qp->rq.wqe_cnt); - return NULL; - } - return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift)); } void *get_send_wqe(struct hns_roce_qp *hr_qp, int n) { - struct ib_qp *ibqp = &hr_qp->ibqp; - struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); - - if ((n < 0) || (n > hr_qp->sq.wqe_cnt)) { - dev_err(&hr_dev->pdev->dev, "sq wqe index:%d,sq wqe cnt:%d\r\n", - n, hr_qp->sq.wqe_cnt); - return NULL; - } - return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift)); } @@ -837,8 +821,7 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev) /* A port include two SQP, six port total 12 */ ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps, - hr_dev->caps.num_qps - 1, - hr_dev->caps.sqp_start + SQP_NUM, + hr_dev->caps.num_qps - 1, SQP_NUM, reserved_from_top); if (ret) { dev_err(&hr_dev->pdev->dev, "qp bitmap init failed!error=%d\n", |