summaryrefslogtreecommitdiff
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-07-01 14:54:03 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-07-01 14:54:03 -0700
commite04360a2ea01bf42aa639b65aad81f502e896c7f (patch)
tree59a4f4999a0da1204744aee8888b7001f7cccabd /drivers/infiniband
parent514798d36572fb8eba6ccff3de10c9615063a7f5 (diff)
parent3d8287544223a3d2f37981c1f9ffd94d0b5e9ffc (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe: "This contains a replacement driver for Intel iWarp hardware. This new driver supports the old ethernet hardware and also newer chips that can do ROCE. Other than that, this contains the typical mix of patches: - Driver updates and cleanups for bnxt_re, cxgb4, mlx4, and mlx5 - Many static checker driven code clean ups, including a wide refcount_t conversion - Several series for the hns driver, more HIP09 HW capabilities, migration to new HW register manipulators, and code cleanups - Minor fixes and improvements in srp, rts, and cm - Improvements throughout for sysfs related code to use DEVICE_ATTR_*, make the ib_port sysfs first-class, and overall use sysfs APIs properly - Intel's new irdma driver replacing i40iw - rxe general clean ups and Memory Window support" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (211 commits) RDMA/core: Always release restrack object RDMA/mlx5: Don't access NULL-cleared mpi pointer RDMA/irdma: Fix potential overflow expression in irdma_prm_get_pbles RDMA/irdma: Check contents of user-space irdma_mem_reg_req object RDMA/rxe: Missing unlock on error in get_srq_wqe() RDMA/cma: Fix rdma_resolve_route() memory leak RDMA/core/sa_query: Remove unused argument RDMA/cma: Fix incorrect Packet Lifetime calculation RDMA/cma: Protect RMW with qp_mutex RDMA/cma: Remove unnecessary INIT->INIT transition RDMA/hns: Add window selection field of congestion control RDMA/hfi1: Remove use of kmap() RDMA/irdma: Remove use of kmap() RDMA/bnxt_re: Fix uninitialized struct bit field rsvd1 IB/isert: Align target max I/O size to initiator size RDMA/hns: Fix incorrect vlan enable bit in QPC MAINTAINERS: Update Broadcom RDMA maintainers RDMA/irdma: Use the queried port attributes RDMA/rxe: Fix redundant skb_put_zero RDMA/rxe: Fix extra copy in prepare_ack_packet ...
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/Kconfig2
-rw-r--r--drivers/infiniband/core/cache.c40
-rw-r--r--drivers/infiniband/core/cm.c863
-rw-r--r--drivers/infiniband/core/cma.c48
-rw-r--r--drivers/infiniband/core/core_priv.h15
-rw-r--r--drivers/infiniband/core/counters.c4
-rw-r--r--drivers/infiniband/core/device.c41
-rw-r--r--drivers/infiniband/core/iwcm.c9
-rw-r--r--drivers/infiniband/core/iwcm.h2
-rw-r--r--drivers/infiniband/core/iwpm_msg.c22
-rw-r--r--drivers/infiniband/core/iwpm_util.c16
-rw-r--r--drivers/infiniband/core/iwpm_util.h4
-rw-r--r--drivers/infiniband/core/mad.c27
-rw-r--r--drivers/infiniband/core/mad_priv.h1
-rw-r--r--drivers/infiniband/core/multicast.c20
-rw-r--r--drivers/infiniband/core/netlink.c2
-rw-r--r--drivers/infiniband/core/nldev.c10
-rw-r--r--drivers/infiniband/core/roce_gid_mgmt.c5
-rw-r--r--drivers/infiniband/core/rw.c8
-rw-r--r--drivers/infiniband/core/sa_query.c10
-rw-r--r--drivers/infiniband/core/security.c9
-rw-r--r--drivers/infiniband/core/sysfs.c1110
-rw-r--r--drivers/infiniband/core/ucma.c11
-rw-r--r--drivers/infiniband/core/ud_header.c8
-rw-r--r--drivers/infiniband/core/umem_odp.c2
-rw-r--r--drivers/infiniband/core/user_mad.c4
-rw-r--r--drivers/infiniband/core/uverbs.h2
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c23
-rw-r--r--drivers/infiniband/core/uverbs_main.c12
-rw-r--r--drivers/infiniband/core/uverbs_uapi.c2
-rw-r--r--drivers/infiniband/core/verbs.c23
-rw-r--r--drivers/infiniband/hw/Makefile2
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.c7
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.h4
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c30
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c19
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h2
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c17
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h7
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c13
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h2
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c8
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h2
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c11
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c2
-rw-r--r--drivers/infiniband/hw/efa/efa.h3
-rw-r--r--drivers/infiniband/hw/efa/efa_main.c3
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c11
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c4
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c6
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h9
-rw-r--r--drivers/infiniband/hw/hfi1/init.c4
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c2
-rw-r--r--drivers/infiniband/hw/hfi1/pio.h2
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c4
-rw-r--r--drivers/infiniband/hw/hfi1/sysfs.c530
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.c2
-rw-r--r--drivers/infiniband/hw/hfi1/trace.c5
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c92
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_alloc.c114
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_common.h12
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c15
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_db.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h72
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c371
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.h13
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c79
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.h5
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c1983
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h969
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c40
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c84
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_pd.c94
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c47
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_srq.c22
-rw-r--r--drivers/infiniband/hw/i40iw/Kconfig9
-rw-r--r--drivers/infiniband/hw/i40iw/Makefile9
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h602
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c4419
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.h462
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_ctrl.c5243
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_d.h1746
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_hmc.c821
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_hmc.h241
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_hw.c851
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c2064
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_osdep.h195
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_p.h129
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_pble.c611
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_pble.h131
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.c1496
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.h188
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_register.h1030
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_status.h101
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_type.h1358
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_uk.c1200
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_user.h422
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c1518
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c2652
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.h179
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_vf.c85
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_vf.h62
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_virtchnl.c759
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_virtchnl.h124
-rw-r--r--drivers/infiniband/hw/irdma/Kconfig12
-rw-r--r--drivers/infiniband/hw/irdma/Makefile27
-rw-r--r--drivers/infiniband/hw/irdma/cm.c4421
-rw-r--r--drivers/infiniband/hw/irdma/cm.h417
-rw-r--r--drivers/infiniband/hw/irdma/ctrl.c5657
-rw-r--r--drivers/infiniband/hw/irdma/defs.h1155
-rw-r--r--drivers/infiniband/hw/irdma/hmc.c710
-rw-r--r--drivers/infiniband/hw/irdma/hmc.h180
-rw-r--r--drivers/infiniband/hw/irdma/hw.c2725
-rw-r--r--drivers/infiniband/hw/irdma/i40iw_hw.c216
-rw-r--r--drivers/infiniband/hw/irdma/i40iw_hw.h160
-rw-r--r--drivers/infiniband/hw/irdma/i40iw_if.c216
-rw-r--r--drivers/infiniband/hw/irdma/icrdma_hw.c149
-rw-r--r--drivers/infiniband/hw/irdma/icrdma_hw.h71
-rw-r--r--drivers/infiniband/hw/irdma/irdma.h153
-rw-r--r--drivers/infiniband/hw/irdma/main.c358
-rw-r--r--drivers/infiniband/hw/irdma/main.h555
-rw-r--r--drivers/infiniband/hw/irdma/osdep.h86
-rw-r--r--drivers/infiniband/hw/irdma/pble.c520
-rw-r--r--drivers/infiniband/hw/irdma/pble.h136
-rw-r--r--drivers/infiniband/hw/irdma/protos.h116
-rw-r--r--drivers/infiniband/hw/irdma/puda.c1744
-rw-r--r--drivers/infiniband/hw/irdma/puda.h194
-rw-r--r--drivers/infiniband/hw/irdma/status.h71
-rw-r--r--drivers/infiniband/hw/irdma/trace.c112
-rw-r--r--drivers/infiniband/hw/irdma/trace.h3
-rw-r--r--drivers/infiniband/hw/irdma/trace_cm.h458
-rw-r--r--drivers/infiniband/hw/irdma/type.h1541
-rw-r--r--drivers/infiniband/hw/irdma/uda.c271
-rw-r--r--drivers/infiniband/hw/irdma/uda.h89
-rw-r--r--drivers/infiniband/hw/irdma/uda_d.h128
-rw-r--r--drivers/infiniband/hw/irdma/uk.c1684
-rw-r--r--drivers/infiniband/hw/irdma/user.h437
-rw-r--r--drivers/infiniband/hw/irdma/utils.c2541
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c4544
-rw-r--r--drivers/infiniband/hw/irdma/verbs.h225
-rw-r--r--drivers/infiniband/hw/irdma/ws.c406
-rw-r--r--drivers/infiniband/hw/irdma/ws.h41
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c8
-rw-r--r--drivers/infiniband/hw/mlx4/main.c27
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c11
-rw-r--r--drivers/infiniband/hw/mlx5/counters.c42
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c10
-rw-r--r--drivers/infiniband/hw/mlx5/doorbell.c3
-rw-r--r--drivers/infiniband/hw/mlx5/main.c19
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h12
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c12
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c5
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c177
-rw-r--r--drivers/infiniband/hw/mlx5/qpc.c6
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c2
-rw-r--r--drivers/infiniband/hw/mlx5/wr.c14
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c2
-rw-r--r--drivers/infiniband/hw/qedr/main.c2
-rw-r--r--drivers/infiniband/hw/qib/qib.h8
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c616
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c6
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_main.c3
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c4
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c3
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c6
-rw-r--r--drivers/infiniband/sw/rxe/Makefile1
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c36
-rw-r--r--drivers/infiniband/sw/rxe/rxe_cq.c32
-rw-r--r--drivers/infiniband/sw/rxe/rxe_hw_counters.c7
-rw-r--r--drivers/infiniband/sw/rxe/rxe_hw_counters.h4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h38
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c130
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mw.c343
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c14
-rw-r--r--drivers/infiniband/sw/rxe/rxe_opcode.c11
-rw-r--r--drivers/infiniband/sw/rxe/rxe_opcode.h3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_param.h19
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c45
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.h8
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c23
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.c21
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.h272
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c159
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c208
-rw-r--r--drivers/infiniband/sw/rxe/rxe_srq.c5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c101
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h53
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c48
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c4
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c2
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c18
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h3
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c5
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c254
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.h6
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-pri.h27
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c12
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c1
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.c199
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.h4
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.c53
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.h2
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c256
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h2
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c1
211 files changed, 37761 insertions, 34007 deletions
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 04a78d9f8fe3..33d3ce9c888e 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -82,7 +82,7 @@ source "drivers/infiniband/hw/mthca/Kconfig"
source "drivers/infiniband/hw/qib/Kconfig"
source "drivers/infiniband/hw/cxgb4/Kconfig"
source "drivers/infiniband/hw/efa/Kconfig"
-source "drivers/infiniband/hw/i40iw/Kconfig"
+source "drivers/infiniband/hw/irdma/Kconfig"
source "drivers/infiniband/hw/mlx4/Kconfig"
source "drivers/infiniband/hw/mlx5/Kconfig"
source "drivers/infiniband/hw/ocrdma/Kconfig"
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 3b0991fedd81..c9e9fc81447e 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -240,7 +240,7 @@ static void free_gid_entry_locked(struct ib_gid_table_entry *entry)
u32 port_num = entry->attr.port_num;
struct ib_gid_table *table = rdma_gid_table(device, port_num);
- dev_dbg(&device->dev, "%s port=%u index=%d gid %pI6\n", __func__,
+ dev_dbg(&device->dev, "%s port=%u index=%u gid %pI6\n", __func__,
port_num, entry->attr.index, entry->attr.gid.raw);
write_lock_irq(&table->rwlock);
@@ -323,7 +323,7 @@ static void store_gid_entry(struct ib_gid_table *table,
{
entry->state = GID_TABLE_ENTRY_VALID;
- dev_dbg(&entry->attr.device->dev, "%s port=%d index=%d gid %pI6\n",
+ dev_dbg(&entry->attr.device->dev, "%s port=%u index=%u gid %pI6\n",
__func__, entry->attr.port_num, entry->attr.index,
entry->attr.gid.raw);
@@ -354,7 +354,7 @@ static int add_roce_gid(struct ib_gid_table_entry *entry)
int ret;
if (!attr->ndev) {
- dev_err(&attr->device->dev, "%s NULL netdev port=%d index=%d\n",
+ dev_err(&attr->device->dev, "%s NULL netdev port=%u index=%u\n",
__func__, attr->port_num, attr->index);
return -EINVAL;
}
@@ -362,7 +362,7 @@ static int add_roce_gid(struct ib_gid_table_entry *entry)
ret = attr->device->ops.add_gid(attr, &entry->context);
if (ret) {
dev_err(&attr->device->dev,
- "%s GID add failed port=%d index=%d\n",
+ "%s GID add failed port=%u index=%u\n",
__func__, attr->port_num, attr->index);
return ret;
}
@@ -805,7 +805,7 @@ static void release_gid_table(struct ib_device *device,
continue;
if (kref_read(&table->data_vec[i]->kref) > 1) {
dev_err(&device->dev,
- "GID entry ref leak for index %d ref=%d\n", i,
+ "GID entry ref leak for index %d ref=%u\n", i,
kref_read(&table->data_vec[i]->kref));
leak = true;
}
@@ -1069,19 +1069,14 @@ int ib_get_cached_pkey(struct ib_device *device,
}
EXPORT_SYMBOL(ib_get_cached_pkey);
-int ib_get_cached_subnet_prefix(struct ib_device *device, u32 port_num,
+void ib_get_cached_subnet_prefix(struct ib_device *device, u32 port_num,
u64 *sn_pfx)
{
unsigned long flags;
- if (!rdma_is_port_valid(device, port_num))
- return -EINVAL;
-
read_lock_irqsave(&device->cache_lock, flags);
*sn_pfx = device->port_data[port_num].cache.subnet_prefix;
read_unlock_irqrestore(&device->cache_lock, flags);
-
- return 0;
}
EXPORT_SYMBOL(ib_get_cached_subnet_prefix);
@@ -1465,10 +1460,12 @@ err:
}
static int
-ib_cache_update(struct ib_device *device, u32 port, bool enforce_security)
+ib_cache_update(struct ib_device *device, u32 port, bool update_gids,
+ bool update_pkeys, bool enforce_security)
{
struct ib_port_attr *tprops = NULL;
- struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache;
+ struct ib_pkey_cache *pkey_cache = NULL;
+ struct ib_pkey_cache *old_pkey_cache = NULL;
int i;
int ret;
@@ -1485,14 +1482,16 @@ ib_cache_update(struct ib_device *device, u32 port, bool enforce_security)
goto err;
}
- if (!rdma_protocol_roce(device, port)) {
+ if (!rdma_protocol_roce(device, port) && update_gids) {
ret = config_non_roce_gid_cache(device, port,
tprops->gid_tbl_len);
if (ret)
goto err;
}
- if (tprops->pkey_tbl_len) {
+ update_pkeys &= !!tprops->pkey_tbl_len;
+
+ if (update_pkeys) {
pkey_cache = kmalloc(struct_size(pkey_cache, table,
tprops->pkey_tbl_len),
GFP_KERNEL);
@@ -1517,9 +1516,10 @@ ib_cache_update(struct ib_device *device, u32 port, bool enforce_security)
write_lock_irq(&device->cache_lock);
- old_pkey_cache = device->port_data[port].cache.pkey;
-
- device->port_data[port].cache.pkey = pkey_cache;
+ if (update_pkeys) {
+ old_pkey_cache = device->port_data[port].cache.pkey;
+ device->port_data[port].cache.pkey = pkey_cache;
+ }
device->port_data[port].cache.lmc = tprops->lmc;
device->port_data[port].cache.port_state = tprops->state;
@@ -1551,6 +1551,8 @@ static void ib_cache_event_task(struct work_struct *_work)
* the cache.
*/
ret = ib_cache_update(work->event.device, work->event.element.port_num,
+ work->event.event == IB_EVENT_GID_CHANGE,
+ work->event.event == IB_EVENT_PKEY_CHANGE,
work->enforce_security);
/* GID event is notified already for individual GID entries by
@@ -1624,7 +1626,7 @@ int ib_cache_setup_one(struct ib_device *device)
return err;
rdma_for_each_port (device, p) {
- err = ib_cache_update(device, p, true);
+ err = ib_cache_update(device, p, true, true, true);
if (err)
return err;
}
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 0ead0d223154..c903b74f46a4 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -25,6 +25,7 @@
#include <rdma/ib_cache.h>
#include <rdma/ib_cm.h>
+#include <rdma/ib_sysfs.h>
#include "cm_msgs.h"
#include "core_priv.h"
#include "cm_trace.h"
@@ -121,8 +122,6 @@ static struct ib_cm {
__be32 random_id_operand;
struct list_head timewait_list;
struct workqueue_struct *wq;
- /* Sync on cm change port state */
- spinlock_t state_lock;
} cm;
/* Counter indexes ordered by attribute ID */
@@ -150,66 +149,23 @@ enum {
CM_COUNTER_GROUPS
};
-static char const counter_group_names[CM_COUNTER_GROUPS]
- [sizeof("cm_rx_duplicates")] = {
- "cm_tx_msgs", "cm_tx_retries",
- "cm_rx_msgs", "cm_rx_duplicates"
-};
-
-struct cm_counter_group {
- struct kobject obj;
- atomic_long_t counter[CM_ATTR_COUNT];
-};
-
struct cm_counter_attribute {
- struct attribute attr;
- int index;
-};
-
-#define CM_COUNTER_ATTR(_name, _index) \
-struct cm_counter_attribute cm_##_name##_counter_attr = { \
- .attr = { .name = __stringify(_name), .mode = 0444 }, \
- .index = _index \
-}
-
-static CM_COUNTER_ATTR(req, CM_REQ_COUNTER);
-static CM_COUNTER_ATTR(mra, CM_MRA_COUNTER);
-static CM_COUNTER_ATTR(rej, CM_REJ_COUNTER);
-static CM_COUNTER_ATTR(rep, CM_REP_COUNTER);
-static CM_COUNTER_ATTR(rtu, CM_RTU_COUNTER);
-static CM_COUNTER_ATTR(dreq, CM_DREQ_COUNTER);
-static CM_COUNTER_ATTR(drep, CM_DREP_COUNTER);
-static CM_COUNTER_ATTR(sidr_req, CM_SIDR_REQ_COUNTER);
-static CM_COUNTER_ATTR(sidr_rep, CM_SIDR_REP_COUNTER);
-static CM_COUNTER_ATTR(lap, CM_LAP_COUNTER);
-static CM_COUNTER_ATTR(apr, CM_APR_COUNTER);
-
-static struct attribute *cm_counter_default_attrs[] = {
- &cm_req_counter_attr.attr,
- &cm_mra_counter_attr.attr,
- &cm_rej_counter_attr.attr,
- &cm_rep_counter_attr.attr,
- &cm_rtu_counter_attr.attr,
- &cm_dreq_counter_attr.attr,
- &cm_drep_counter_attr.attr,
- &cm_sidr_req_counter_attr.attr,
- &cm_sidr_rep_counter_attr.attr,
- &cm_lap_counter_attr.attr,
- &cm_apr_counter_attr.attr,
- NULL
+ struct ib_port_attribute attr;
+ unsigned short group;
+ unsigned short index;
};
struct cm_port {
struct cm_device *cm_dev;
struct ib_mad_agent *mad_agent;
u32 port_num;
- struct list_head cm_priv_prim_list;
- struct list_head cm_priv_altr_list;
- struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
+ atomic_long_t counters[CM_COUNTER_GROUPS][CM_ATTR_COUNT];
};
struct cm_device {
+ struct kref kref;
struct list_head list;
+ spinlock_t mad_agent_lock;
struct ib_device *ib_device;
u8 ack_delay;
int going_down;
@@ -218,7 +174,6 @@ struct cm_device {
struct cm_av {
struct cm_port *port;
- union ib_gid dgid;
struct rdma_ah_attr ah_attr;
u16 pkey_index;
u8 timeout;
@@ -251,6 +206,7 @@ struct cm_id_private {
struct rb_node service_node;
struct rb_node sidr_id_node;
+ u32 sidr_slid;
spinlock_t lock; /* Do not acquire inside cm.lock */
struct completion comp;
refcount_t refcount;
@@ -285,18 +241,28 @@ struct cm_id_private {
u8 service_timeout;
u8 target_ack_delay;
- struct list_head prim_list;
- struct list_head altr_list;
- /* Indicates that the send port mad is registered and av is set */
- int prim_send_port_not_ready;
- int altr_send_port_not_ready;
-
struct list_head work_list;
atomic_t work_count;
struct rdma_ucm_ece ece;
};
+static void cm_dev_release(struct kref *kref)
+{
+ struct cm_device *cm_dev = container_of(kref, struct cm_device, kref);
+ u32 i;
+
+ rdma_for_each_port(cm_dev->ib_device, i)
+ kfree(cm_dev->port[i - 1]);
+
+ kfree(cm_dev);
+}
+
+static void cm_device_put(struct cm_device *cm_dev)
+{
+ kref_put(&cm_dev->kref, cm_dev_release);
+}
+
static void cm_work_handler(struct work_struct *work);
static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
@@ -305,52 +271,37 @@ static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
complete(&cm_id_priv->comp);
}
-static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
- struct ib_mad_send_buf **msg)
+static struct ib_mad_send_buf *cm_alloc_msg(struct cm_id_private *cm_id_priv)
{
struct ib_mad_agent *mad_agent;
struct ib_mad_send_buf *m;
struct ib_ah *ah;
- struct cm_av *av;
- unsigned long flags, flags2;
- int ret = 0;
- /* don't let the port to be released till the agent is down */
- spin_lock_irqsave(&cm.state_lock, flags2);
- spin_lock_irqsave(&cm.lock, flags);
- if (!cm_id_priv->prim_send_port_not_ready)
- av = &cm_id_priv->av;
- else if (!cm_id_priv->altr_send_port_not_ready &&
- (cm_id_priv->alt_av.port))
- av = &cm_id_priv->alt_av;
- else {
- pr_info("%s: not valid CM id\n", __func__);
- ret = -ENODEV;
- spin_unlock_irqrestore(&cm.lock, flags);
- goto out;
- }
- spin_unlock_irqrestore(&cm.lock, flags);
- /* Make sure the port haven't released the mad yet */
+ lockdep_assert_held(&cm_id_priv->lock);
+
+ if (!cm_id_priv->av.port)
+ return ERR_PTR(-EINVAL);
+
+ spin_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
mad_agent = cm_id_priv->av.port->mad_agent;
if (!mad_agent) {
- pr_info("%s: not a valid MAD agent\n", __func__);
- ret = -ENODEV;
+ m = ERR_PTR(-EINVAL);
goto out;
}
- ah = rdma_create_ah(mad_agent->qp->pd, &av->ah_attr, 0);
+
+ ah = rdma_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr, 0);
if (IS_ERR(ah)) {
- ret = PTR_ERR(ah);
+ m = ERR_CAST(ah);
goto out;
}
m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
- av->pkey_index,
+ cm_id_priv->av.pkey_index,
0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
GFP_ATOMIC,
IB_MGMT_BASE_VERSION);
if (IS_ERR(m)) {
rdma_destroy_ah(ah, 0);
- ret = PTR_ERR(m);
goto out;
}
@@ -360,11 +311,49 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
refcount_inc(&cm_id_priv->refcount);
m->context[0] = cm_id_priv;
- *msg = m;
out:
- spin_unlock_irqrestore(&cm.state_lock, flags2);
- return ret;
+ spin_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
+ return m;
+}
+
+static void cm_free_msg(struct ib_mad_send_buf *msg)
+{
+ struct cm_id_private *cm_id_priv = msg->context[0];
+
+ if (msg->ah)
+ rdma_destroy_ah(msg->ah, 0);
+ cm_deref_id(cm_id_priv);
+ ib_free_send_mad(msg);
+}
+
+static struct ib_mad_send_buf *
+cm_alloc_priv_msg(struct cm_id_private *cm_id_priv)
+{
+ struct ib_mad_send_buf *msg;
+
+ lockdep_assert_held(&cm_id_priv->lock);
+
+ msg = cm_alloc_msg(cm_id_priv);
+ if (IS_ERR(msg))
+ return msg;
+ cm_id_priv->msg = msg;
+ return msg;
+}
+
+static void cm_free_priv_msg(struct ib_mad_send_buf *msg)
+{
+ struct cm_id_private *cm_id_priv = msg->context[0];
+
+ lockdep_assert_held(&cm_id_priv->lock);
+
+ if (!WARN_ON(cm_id_priv->msg != msg))
+ cm_id_priv->msg = NULL;
+
+ if (msg->ah)
+ rdma_destroy_ah(msg->ah, 0);
+ cm_deref_id(cm_id_priv);
+ ib_free_send_mad(msg);
}
static struct ib_mad_send_buf *cm_alloc_response_msg_no_ah(struct cm_port *port,
@@ -391,15 +380,6 @@ static int cm_create_response_msg_ah(struct cm_port *port,
return 0;
}
-static void cm_free_msg(struct ib_mad_send_buf *msg)
-{
- if (msg->ah)
- rdma_destroy_ah(msg->ah, 0);
- if (msg->context[0])
- cm_deref_id(msg->context[0]);
- ib_free_send_mad(msg);
-}
-
static int cm_alloc_response_msg(struct cm_port *port,
struct ib_mad_recv_wc *mad_recv_wc,
struct ib_mad_send_buf **msg)
@@ -413,7 +393,7 @@ static int cm_alloc_response_msg(struct cm_port *port,
ret = cm_create_response_msg_ah(port, mad_recv_wc, m);
if (ret) {
- cm_free_msg(m);
+ ib_free_send_mad(m);
return ret;
}
@@ -421,6 +401,13 @@ static int cm_alloc_response_msg(struct cm_port *port,
return 0;
}
+static void cm_free_response_msg(struct ib_mad_send_buf *msg)
+{
+ if (msg->ah)
+ rdma_destroy_ah(msg->ah, 0);
+ ib_free_send_mad(msg);
+}
+
static void *cm_copy_private_data(const void *private_data, u8 private_data_len)
{
void *data;
@@ -445,57 +432,38 @@ static void cm_set_private_data(struct cm_id_private *cm_id_priv,
cm_id_priv->private_data_len = private_data_len;
}
-static int cm_init_av_for_lap(struct cm_port *port, struct ib_wc *wc,
- struct ib_grh *grh, struct cm_av *av)
+static void cm_set_av_port(struct cm_av *av, struct cm_port *port)
{
- struct rdma_ah_attr new_ah_attr;
- int ret;
+ struct cm_port *old_port = av->port;
- av->port = port;
- av->pkey_index = wc->pkey_index;
+ if (old_port == port)
+ return;
- /*
- * av->ah_attr might be initialized based on past wc during incoming
- * connect request or while sending out connect request. So initialize
- * a new ah_attr on stack. If initialization fails, old ah_attr is
- * used for sending any responses. If initialization is successful,
- * than new ah_attr is used by overwriting old one.
- */
- ret = ib_init_ah_attr_from_wc(port->cm_dev->ib_device,
- port->port_num, wc,
- grh, &new_ah_attr);
- if (ret)
- return ret;
+ av->port = port;
+ if (old_port)
+ cm_device_put(old_port->cm_dev);
+ if (port)
+ kref_get(&port->cm_dev->kref);
+}
- rdma_move_ah_attr(&av->ah_attr, &new_ah_attr);
- return 0;
+static void cm_init_av_for_lap(struct cm_port *port, struct ib_wc *wc,
+ struct rdma_ah_attr *ah_attr, struct cm_av *av)
+{
+ cm_set_av_port(av, port);
+ av->pkey_index = wc->pkey_index;
+ rdma_move_ah_attr(&av->ah_attr, ah_attr);
}
static int cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
struct ib_grh *grh, struct cm_av *av)
{
- av->port = port;
+ cm_set_av_port(av, port);
av->pkey_index = wc->pkey_index;
return ib_init_ah_attr_from_wc(port->cm_dev->ib_device,
port->port_num, wc,
grh, &av->ah_attr);
}
-static void add_cm_id_to_port_list(struct cm_id_private *cm_id_priv,
- struct cm_av *av, struct cm_port *port)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cm.lock, flags);
- if (&cm_id_priv->av == av)
- list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list);
- else if (&cm_id_priv->alt_av == av)
- list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list);
- else
- WARN_ON(true);
- spin_unlock_irqrestore(&cm.lock, flags);
-}
-
static struct cm_port *
get_cm_port_from_path(struct sa_path_rec *path, const struct ib_gid_attr *attr)
{
@@ -539,8 +507,7 @@ get_cm_port_from_path(struct sa_path_rec *path, const struct ib_gid_attr *attr)
static int cm_init_av_by_path(struct sa_path_rec *path,
const struct ib_gid_attr *sgid_attr,
- struct cm_av *av,
- struct cm_id_private *cm_id_priv)
+ struct cm_av *av)
{
struct rdma_ah_attr new_ah_attr;
struct cm_device *cm_dev;
@@ -557,7 +524,7 @@ static int cm_init_av_by_path(struct sa_path_rec *path,
if (ret)
return ret;
- av->port = port;
+ cm_set_av_port(av, port);
/*
* av->ah_attr might be initialized based on wc or during
@@ -574,11 +541,26 @@ static int cm_init_av_by_path(struct sa_path_rec *path,
return ret;
av->timeout = path->packet_life_time + 1;
- add_cm_id_to_port_list(cm_id_priv, av, port);
rdma_move_ah_attr(&av->ah_attr, &new_ah_attr);
return 0;
}
+/* Move av created by cm_init_av_by_path(), so av.dgid is not moved */
+static void cm_move_av_from_path(struct cm_av *dest, struct cm_av *src)
+{
+ cm_set_av_port(dest, src->port);
+ cm_set_av_port(src, NULL);
+ dest->pkey_index = src->pkey_index;
+ rdma_move_ah_attr(&dest->ah_attr, &src->ah_attr);
+ dest->timeout = src->timeout;
+}
+
+static void cm_destroy_av(struct cm_av *av)
+{
+ rdma_destroy_ah_attr(&av->ah_attr);
+ cm_set_av_port(av, NULL);
+}
+
static u32 cm_local_id(__be32 local_id)
{
return (__force u32) (local_id ^ cm.random_id_operand);
@@ -803,7 +785,6 @@ cm_insert_remote_sidr(struct cm_id_private *cm_id_priv)
struct rb_node **link = &cm.remote_sidr_table.rb_node;
struct rb_node *parent = NULL;
struct cm_id_private *cur_cm_id_priv;
- union ib_gid *port_gid = &cm_id_priv->av.dgid;
__be32 remote_id = cm_id_priv->id.remote_id;
while (*link) {
@@ -815,12 +796,9 @@ cm_insert_remote_sidr(struct cm_id_private *cm_id_priv)
else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id))
link = &(*link)->rb_right;
else {
- int cmp;
- cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
- sizeof *port_gid);
- if (cmp < 0)
+ if (cur_cm_id_priv->sidr_slid < cm_id_priv->sidr_slid)
link = &(*link)->rb_left;
- else if (cmp > 0)
+ else if (cur_cm_id_priv->sidr_slid > cm_id_priv->sidr_slid)
link = &(*link)->rb_right;
else
return cur_cm_id_priv;
@@ -854,8 +832,6 @@ static struct cm_id_private *cm_alloc_id_priv(struct ib_device *device,
spin_lock_init(&cm_id_priv->lock);
init_completion(&cm_id_priv->comp);
INIT_LIST_HEAD(&cm_id_priv->work_list);
- INIT_LIST_HEAD(&cm_id_priv->prim_list);
- INIT_LIST_HEAD(&cm_id_priv->altr_list);
atomic_set(&cm_id_priv->work_count, -1);
refcount_set(&cm_id_priv->refcount, 1);
@@ -1082,7 +1058,7 @@ retest:
break;
case IB_CM_SIDR_REQ_SENT:
cm_id->state = IB_CM_IDLE;
- ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
+ ib_cancel_mad(cm_id_priv->msg);
break;
case IB_CM_SIDR_REQ_RCVD:
cm_send_sidr_rep_locked(cm_id_priv,
@@ -1093,7 +1069,7 @@ retest:
break;
case IB_CM_REQ_SENT:
case IB_CM_MRA_REQ_RCVD:
- ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
+ ib_cancel_mad(cm_id_priv->msg);
cm_send_rej_locked(cm_id_priv, IB_CM_REJ_TIMEOUT,
&cm_id_priv->id.device->node_guid,
sizeof(cm_id_priv->id.device->node_guid),
@@ -1111,7 +1087,7 @@ retest:
break;
case IB_CM_REP_SENT:
case IB_CM_MRA_REP_RCVD:
- ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
+ ib_cancel_mad(cm_id_priv->msg);
cm_send_rej_locked(cm_id_priv, IB_CM_REJ_CONSUMER_DEFINED, NULL,
0, NULL, 0);
goto retest;
@@ -1129,7 +1105,7 @@ retest:
cm_send_dreq_locked(cm_id_priv, NULL, 0);
goto retest;
case IB_CM_DREQ_SENT:
- ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
+ ib_cancel_mad(cm_id_priv->msg);
cm_enter_timewait(cm_id_priv);
goto retest;
case IB_CM_DREQ_RCVD:
@@ -1156,12 +1132,7 @@ retest:
kfree(cm_id_priv->timewait_info);
cm_id_priv->timewait_info = NULL;
}
- if (!list_empty(&cm_id_priv->altr_list) &&
- (!cm_id_priv->altr_send_port_not_ready))
- list_del(&cm_id_priv->altr_list);
- if (!list_empty(&cm_id_priv->prim_list) &&
- (!cm_id_priv->prim_send_port_not_ready))
- list_del(&cm_id_priv->prim_list);
+
WARN_ON(cm_id_priv->listen_sharecount);
WARN_ON(!RB_EMPTY_NODE(&cm_id_priv->service_node));
if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
@@ -1175,8 +1146,8 @@ retest:
while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
cm_free_work(work);
- rdma_destroy_ah_attr(&cm_id_priv->av.ah_attr);
- rdma_destroy_ah_attr(&cm_id_priv->alt_av.ah_attr);
+ cm_destroy_av(&cm_id_priv->av);
+ cm_destroy_av(&cm_id_priv->alt_av);
kfree(cm_id_priv->private_data);
kfree_rcu(cm_id_priv, rcu);
}
@@ -1308,10 +1279,18 @@ EXPORT_SYMBOL(ib_cm_insert_listen);
static __be64 cm_form_tid(struct cm_id_private *cm_id_priv)
{
- u64 hi_tid, low_tid;
+ u64 hi_tid = 0, low_tid;
- hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
- low_tid = (u64)cm_id_priv->id.local_id;
+ lockdep_assert_held(&cm_id_priv->lock);
+
+ low_tid = (u64)cm_id_priv->id.local_id;
+ if (!cm_id_priv->av.port)
+ return cpu_to_be64(low_tid);
+
+ spin_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
+ if (cm_id_priv->av.port->mad_agent)
+ hi_tid = ((u64)cm_id_priv->av.port->mad_agent->hi_tid) << 32;
+ spin_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
return cpu_to_be64(hi_tid | low_tid);
}
@@ -1500,7 +1479,9 @@ static int cm_validate_req_param(struct ib_cm_req_param *param)
int ib_send_cm_req(struct ib_cm_id *cm_id,
struct ib_cm_req_param *param)
{
+ struct cm_av av = {}, alt_av = {};
struct cm_id_private *cm_id_priv;
+ struct ib_mad_send_buf *msg;
struct cm_req_msg *req_msg;
unsigned long flags;
int ret;
@@ -1514,8 +1495,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id->state != IB_CM_IDLE || WARN_ON(cm_id_priv->timewait_info)) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
@@ -1524,19 +1504,20 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
if (IS_ERR(cm_id_priv->timewait_info)) {
ret = PTR_ERR(cm_id_priv->timewait_info);
cm_id_priv->timewait_info = NULL;
- goto out;
+ return ret;
}
ret = cm_init_av_by_path(param->primary_path,
- param->ppath_sgid_attr, &cm_id_priv->av,
- cm_id_priv);
+ param->ppath_sgid_attr, &av);
if (ret)
- goto out;
+ return ret;
if (param->alternate_path) {
ret = cm_init_av_by_path(param->alternate_path, NULL,
- &cm_id_priv->alt_av, cm_id_priv);
- if (ret)
- goto out;
+ &alt_av);
+ if (ret) {
+ cm_destroy_av(&av);
+ return ret;
+ }
}
cm_id->service_id = param->service_id;
cm_id->service_mask = ~cpu_to_be64(0);
@@ -1552,33 +1533,40 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
cm_id_priv->pkey = param->primary_path->pkey;
cm_id_priv->qp_type = param->qp_type;
- ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
- if (ret)
- goto out;
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+
+ cm_move_av_from_path(&cm_id_priv->av, &av);
+ if (param->alternate_path)
+ cm_move_av_from_path(&cm_id_priv->alt_av, &alt_av);
+
+ msg = cm_alloc_priv_msg(cm_id_priv);
+ if (IS_ERR(msg)) {
+ ret = PTR_ERR(msg);
+ goto out_unlock;
+ }
- req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
+ req_msg = (struct cm_req_msg *)msg->mad;
cm_format_req(req_msg, cm_id_priv, param);
cm_id_priv->tid = req_msg->hdr.tid;
- cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms;
- cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
+ msg->timeout_ms = cm_id_priv->timeout_ms;
+ msg->context[1] = (void *)(unsigned long)IB_CM_REQ_SENT;
cm_id_priv->local_qpn = cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg));
trace_icm_send_req(&cm_id_priv->id);
- spin_lock_irqsave(&cm_id_priv->lock, flags);
- ret = ib_post_send_mad(cm_id_priv->msg, NULL);
- if (ret) {
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- goto error2;
- }
+ ret = ib_post_send_mad(msg, NULL);
+ if (ret)
+ goto out_free;
BUG_ON(cm_id->state != IB_CM_IDLE);
cm_id->state = IB_CM_REQ_SENT;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return 0;
-
-error2: cm_free_msg(cm_id_priv->msg);
-out: return ret;
+out_free:
+ cm_free_priv_msg(msg);
+out_unlock:
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ return ret;
}
EXPORT_SYMBOL(ib_send_cm_req);
@@ -1618,7 +1606,7 @@ static int cm_issue_rej(struct cm_port *port,
IBA_GET(CM_REJ_REMOTE_COMM_ID, rcv_msg));
ret = ib_post_send_mad(msg, NULL);
if (ret)
- cm_free_msg(msg);
+ cm_free_response_msg(msg);
return ret;
}
@@ -1757,7 +1745,7 @@ static u16 cm_get_bth_pkey(struct cm_work *work)
ret = ib_get_cached_pkey(ib_dev, port_num, pkey_index, &pkey);
if (ret) {
- dev_warn_ratelimited(&ib_dev->dev, "ib_cm: Couldn't retrieve pkey for incoming request (port %d, pkey index %d). %d\n",
+ dev_warn_ratelimited(&ib_dev->dev, "ib_cm: Couldn't retrieve pkey for incoming request (port %u, pkey index %u). %d\n",
port_num, pkey_index, ret);
return 0;
}
@@ -1934,8 +1922,8 @@ static void cm_dup_req_handler(struct cm_work *work,
struct ib_mad_send_buf *msg = NULL;
int ret;
- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
- counter[CM_REQ_COUNTER]);
+ atomic_long_inc(
+ &work->port->counters[CM_RECV_DUPLICATES][CM_REQ_COUNTER]);
/* Quick state check to discard duplicate REQs. */
spin_lock_irq(&cm_id_priv->lock);
@@ -1974,7 +1962,7 @@ static void cm_dup_req_handler(struct cm_work *work,
return;
unlock: spin_unlock_irq(&cm_id_priv->lock);
-free: cm_free_msg(msg);
+free: cm_free_response_msg(msg);
}
static struct cm_id_private *cm_match_req(struct cm_work *work,
@@ -2163,8 +2151,10 @@ static int cm_req_handler(struct cm_work *work)
sa_path_set_dmac(&work->path[0],
cm_id_priv->av.ah_attr.roce.dmac);
work->path[0].hop_limit = grh->hop_limit;
- ret = cm_init_av_by_path(&work->path[0], gid_attr, &cm_id_priv->av,
- cm_id_priv);
+
+ /* This destroy call is needed to pair with cm_init_av_for_response */
+ cm_destroy_av(&cm_id_priv->av);
+ ret = cm_init_av_by_path(&work->path[0], gid_attr, &cm_id_priv->av);
if (ret) {
int err;
@@ -2183,7 +2173,7 @@ static int cm_req_handler(struct cm_work *work)
}
if (cm_req_has_alt_path(req_msg)) {
ret = cm_init_av_by_path(&work->path[1], NULL,
- &cm_id_priv->alt_av, cm_id_priv);
+ &cm_id_priv->alt_av);
if (ret) {
ib_send_cm_rej(&cm_id_priv->id,
IB_CM_REJ_INVALID_ALT_GID,
@@ -2283,9 +2273,11 @@ int ib_send_cm_rep(struct ib_cm_id *cm_id,
goto out;
}
- ret = cm_alloc_msg(cm_id_priv, &msg);
- if (ret)
+ msg = cm_alloc_priv_msg(cm_id_priv);
+ if (IS_ERR(msg)) {
+ ret = PTR_ERR(msg);
goto out;
+ }
rep_msg = (struct cm_rep_msg *) msg->mad;
cm_format_rep(rep_msg, cm_id_priv, param);
@@ -2294,14 +2286,10 @@ int ib_send_cm_rep(struct ib_cm_id *cm_id,
trace_icm_send_rep(cm_id);
ret = ib_post_send_mad(msg, NULL);
- if (ret) {
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- cm_free_msg(msg);
- return ret;
- }
+ if (ret)
+ goto out_free;
cm_id->state = IB_CM_REP_SENT;
- cm_id_priv->msg = msg;
cm_id_priv->initiator_depth = param->initiator_depth;
cm_id_priv->responder_resources = param->responder_resources;
cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg));
@@ -2309,8 +2297,13 @@ int ib_send_cm_rep(struct ib_cm_id *cm_id,
"IBTA declares QPN to be 24 bits, but it is 0x%X\n",
param->qp_num);
cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF);
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ return 0;
-out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+out_free:
+ cm_free_priv_msg(msg);
+out:
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret;
}
EXPORT_SYMBOL(ib_send_cm_rep);
@@ -2357,9 +2350,11 @@ int ib_send_cm_rtu(struct ib_cm_id *cm_id,
goto error;
}
- ret = cm_alloc_msg(cm_id_priv, &msg);
- if (ret)
+ msg = cm_alloc_msg(cm_id_priv);
+ if (IS_ERR(msg)) {
+ ret = PTR_ERR(msg);
goto error;
+ }
cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
private_data, private_data_len);
@@ -2426,8 +2421,8 @@ static void cm_dup_rep_handler(struct cm_work *work)
if (!cm_id_priv)
return;
- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
- counter[CM_REP_COUNTER]);
+ atomic_long_inc(
+ &work->port->counters[CM_RECV_DUPLICATES][CM_REP_COUNTER]);
ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
if (ret)
goto deref;
@@ -2453,7 +2448,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
goto deref;
unlock: spin_unlock_irq(&cm_id_priv->lock);
-free: cm_free_msg(msg);
+free: cm_free_response_msg(msg);
deref: cm_deref_id(cm_id_priv);
}
@@ -2553,7 +2548,7 @@ static int cm_rep_handler(struct cm_work *work)
cm_ack_timeout(cm_id_priv->target_ack_delay,
cm_id_priv->alt_av.timeout - 1);
- ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
+ ib_cancel_mad(cm_id_priv->msg);
cm_queue_work_unlock(cm_id_priv, work);
return 0;
@@ -2577,7 +2572,7 @@ static int cm_establish_handler(struct cm_work *work)
goto out;
}
- ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
+ ib_cancel_mad(cm_id_priv->msg);
cm_queue_work_unlock(cm_id_priv, work);
return 0;
out:
@@ -2604,13 +2599,13 @@ static int cm_rtu_handler(struct cm_work *work)
if (cm_id_priv->id.state != IB_CM_REP_SENT &&
cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
spin_unlock_irq(&cm_id_priv->lock);
- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
- counter[CM_RTU_COUNTER]);
+ atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
+ [CM_RTU_COUNTER]);
goto out;
}
cm_id_priv->id.state = IB_CM_ESTABLISHED;
- ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
+ ib_cancel_mad(cm_id_priv->msg);
cm_queue_work_unlock(cm_id_priv, work);
return 0;
out:
@@ -2655,12 +2650,12 @@ static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
- ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
+ ib_cancel_mad(cm_id_priv->msg);
- ret = cm_alloc_msg(cm_id_priv, &msg);
- if (ret) {
+ msg = cm_alloc_priv_msg(cm_id_priv);
+ if (IS_ERR(msg)) {
cm_enter_timewait(cm_id_priv);
- return ret;
+ return PTR_ERR(msg);
}
cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
@@ -2672,12 +2667,11 @@ static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
ret = ib_post_send_mad(msg, NULL);
if (ret) {
cm_enter_timewait(cm_id_priv);
- cm_free_msg(msg);
+ cm_free_priv_msg(msg);
return ret;
}
cm_id_priv->id.state = IB_CM_DREQ_SENT;
- cm_id_priv->msg = msg;
return 0;
}
@@ -2732,9 +2726,9 @@ static int cm_send_drep_locked(struct cm_id_private *cm_id_priv,
cm_set_private_data(cm_id_priv, private_data, private_data_len);
cm_enter_timewait(cm_id_priv);
- ret = cm_alloc_msg(cm_id_priv, &msg);
- if (ret)
- return ret;
+ msg = cm_alloc_msg(cm_id_priv);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
private_data, private_data_len);
@@ -2794,7 +2788,7 @@ static int cm_issue_drep(struct cm_port *port,
IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
ret = ib_post_send_mad(msg, NULL);
if (ret)
- cm_free_msg(msg);
+ cm_free_response_msg(msg);
return ret;
}
@@ -2810,8 +2804,8 @@ static int cm_dreq_handler(struct cm_work *work)
cpu_to_be32(IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg)),
cpu_to_be32(IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg)));
if (!cm_id_priv) {
- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
- counter[CM_DREQ_COUNTER]);
+ atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
+ [CM_DREQ_COUNTER]);
cm_issue_drep(work->port, work->mad_recv_wc);
trace_icm_no_priv_err(
IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg),
@@ -2830,18 +2824,18 @@ static int cm_dreq_handler(struct cm_work *work)
switch (cm_id_priv->id.state) {
case IB_CM_REP_SENT:
case IB_CM_DREQ_SENT:
- ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
+ ib_cancel_mad(cm_id_priv->msg);
break;
case IB_CM_ESTABLISHED:
if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
- ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
+ ib_cancel_mad(cm_id_priv->msg);
break;
case IB_CM_MRA_REP_RCVD:
break;
case IB_CM_TIMEWAIT:
- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
- counter[CM_DREQ_COUNTER]);
+ atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
+ [CM_DREQ_COUNTER]);
msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
if (IS_ERR(msg))
goto unlock;
@@ -2853,11 +2847,11 @@ static int cm_dreq_handler(struct cm_work *work)
if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
ib_post_send_mad(msg, NULL))
- cm_free_msg(msg);
+ cm_free_response_msg(msg);
goto deref;
case IB_CM_DREQ_RCVD:
- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
- counter[CM_DREQ_COUNTER]);
+ atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
+ [CM_DREQ_COUNTER]);
goto unlock;
default:
trace_icm_dreq_unknown_err(&cm_id_priv->id);
@@ -2896,7 +2890,7 @@ static int cm_drep_handler(struct cm_work *work)
}
cm_enter_timewait(cm_id_priv);
- ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
+ ib_cancel_mad(cm_id_priv->msg);
cm_queue_work_unlock(cm_id_priv, work);
return 0;
out:
@@ -2927,9 +2921,9 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
case IB_CM_REP_RCVD:
case IB_CM_MRA_REP_SENT:
cm_reset_to_idle(cm_id_priv);
- ret = cm_alloc_msg(cm_id_priv, &msg);
- if (ret)
- return ret;
+ msg = cm_alloc_msg(cm_id_priv);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
ari, ari_length, private_data, private_data_len,
state);
@@ -2937,9 +2931,9 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
case IB_CM_REP_SENT:
case IB_CM_MRA_REP_RCVD:
cm_enter_timewait(cm_id_priv);
- ret = cm_alloc_msg(cm_id_priv, &msg);
- if (ret)
- return ret;
+ msg = cm_alloc_msg(cm_id_priv);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
ari, ari_length, private_data, private_data_len,
state);
@@ -3032,7 +3026,7 @@ static int cm_rej_handler(struct cm_work *work)
case IB_CM_MRA_REQ_RCVD:
case IB_CM_REP_SENT:
case IB_CM_MRA_REP_RCVD:
- ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
+ ib_cancel_mad(cm_id_priv->msg);
fallthrough;
case IB_CM_REQ_RCVD:
case IB_CM_MRA_REQ_SENT:
@@ -3042,7 +3036,7 @@ static int cm_rej_handler(struct cm_work *work)
cm_reset_to_idle(cm_id_priv);
break;
case IB_CM_DREQ_SENT:
- ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
+ ib_cancel_mad(cm_id_priv->msg);
fallthrough;
case IB_CM_REP_RCVD:
case IB_CM_MRA_REP_SENT:
@@ -3052,8 +3046,7 @@ static int cm_rej_handler(struct cm_work *work)
if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT ||
cm_id_priv->id.lap_state == IB_CM_LAP_SENT) {
if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT)
- ib_cancel_mad(cm_id_priv->av.port->mad_agent,
- cm_id_priv->msg);
+ ib_cancel_mad(cm_id_priv->msg);
cm_enter_timewait(cm_id_priv);
break;
}
@@ -3117,13 +3110,15 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id,
default:
trace_icm_send_mra_unknown_err(&cm_id_priv->id);
ret = -EINVAL;
- goto error1;
+ goto error_unlock;
}
if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) {
- ret = cm_alloc_msg(cm_id_priv, &msg);
- if (ret)
- goto error1;
+ msg = cm_alloc_msg(cm_id_priv);
+ if (IS_ERR(msg)) {
+ ret = PTR_ERR(msg);
+ goto error_unlock;
+ }
cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
msg_response, service_timeout,
@@ -3131,7 +3126,7 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id,
trace_icm_send_mra(cm_id);
ret = ib_post_send_mad(msg, NULL);
if (ret)
- goto error2;
+ goto error_free_msg;
}
cm_id->state = cm_state;
@@ -3141,13 +3136,11 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id,
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return 0;
-error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- kfree(data);
- return ret;
-
-error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- kfree(data);
+error_free_msg:
cm_free_msg(msg);
+error_unlock:
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ kfree(data);
return ret;
}
EXPORT_SYMBOL(ib_send_cm_mra);
@@ -3192,16 +3185,14 @@ static int cm_mra_handler(struct cm_work *work)
case IB_CM_REQ_SENT:
if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
CM_MSG_RESPONSE_REQ ||
- ib_modify_mad(cm_id_priv->av.port->mad_agent,
- cm_id_priv->msg, timeout))
+ ib_modify_mad(cm_id_priv->msg, timeout))
goto out;
cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
break;
case IB_CM_REP_SENT:
if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
CM_MSG_RESPONSE_REP ||
- ib_modify_mad(cm_id_priv->av.port->mad_agent,
- cm_id_priv->msg, timeout))
+ ib_modify_mad(cm_id_priv->msg, timeout))
goto out;
cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
break;
@@ -3209,20 +3200,19 @@ static int cm_mra_handler(struct cm_work *work)
if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
CM_MSG_RESPONSE_OTHER ||
cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
- ib_modify_mad(cm_id_priv->av.port->mad_agent,
- cm_id_priv->msg, timeout)) {
+ ib_modify_mad(cm_id_priv->msg, timeout)) {
if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
- atomic_long_inc(&work->port->
- counter_group[CM_RECV_DUPLICATES].
- counter[CM_MRA_COUNTER]);
+ atomic_long_inc(
+ &work->port->counters[CM_RECV_DUPLICATES]
+ [CM_MRA_COUNTER]);
goto out;
}
cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
break;
case IB_CM_MRA_REQ_RCVD:
case IB_CM_MRA_REP_RCVD:
- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
- counter[CM_MRA_COUNTER]);
+ atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
+ [CM_MRA_COUNTER]);
fallthrough;
default:
trace_icm_mra_unknown_err(&cm_id_priv->id);
@@ -3291,6 +3281,8 @@ static int cm_lap_handler(struct cm_work *work)
struct cm_lap_msg *lap_msg;
struct ib_cm_lap_event_param *param;
struct ib_mad_send_buf *msg = NULL;
+ struct rdma_ah_attr ah_attr;
+ struct cm_av alt_av = {};
int ret;
/* Currently Alternate path messages are not supported for
@@ -3319,7 +3311,25 @@ static int cm_lap_handler(struct cm_work *work)
work->cm_event.private_data =
IBA_GET_MEM_PTR(CM_LAP_PRIVATE_DATA, lap_msg);
+ ret = ib_init_ah_attr_from_wc(work->port->cm_dev->ib_device,
+ work->port->port_num,
+ work->mad_recv_wc->wc,
+ work->mad_recv_wc->recv_buf.grh,
+ &ah_attr);
+ if (ret)
+ goto deref;
+
+ ret = cm_init_av_by_path(param->alternate_path, NULL, &alt_av);
+ if (ret) {
+ rdma_destroy_ah_attr(&ah_attr);
+ return -EINVAL;
+ }
+
spin_lock_irq(&cm_id_priv->lock);
+ cm_init_av_for_lap(work->port, work->mad_recv_wc->wc,
+ &ah_attr, &cm_id_priv->av);
+ cm_move_av_from_path(&cm_id_priv->alt_av, &alt_av);
+
if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
goto unlock;
@@ -3328,8 +3338,8 @@ static int cm_lap_handler(struct cm_work *work)
case IB_CM_LAP_IDLE:
break;
case IB_CM_MRA_LAP_SENT:
- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
- counter[CM_LAP_COUNTER]);
+ atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
+ [CM_LAP_COUNTER]);
msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
if (IS_ERR(msg))
goto unlock;
@@ -3343,27 +3353,16 @@ static int cm_lap_handler(struct cm_work *work)
if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
ib_post_send_mad(msg, NULL))
- cm_free_msg(msg);
+ cm_free_response_msg(msg);
goto deref;
case IB_CM_LAP_RCVD:
- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
- counter[CM_LAP_COUNTER]);
+ atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
+ [CM_LAP_COUNTER]);
goto unlock;
default:
goto unlock;
}
- ret = cm_init_av_for_lap(work->port, work->mad_recv_wc->wc,
- work->mad_recv_wc->recv_buf.grh,
- &cm_id_priv->av);
- if (ret)
- goto unlock;
-
- ret = cm_init_av_by_path(param->alternate_path, NULL,
- &cm_id_priv->alt_av, cm_id_priv);
- if (ret)
- goto unlock;
-
cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
cm_id_priv->tid = lap_msg->hdr.tid;
cm_queue_work_unlock(cm_id_priv, work);
@@ -3410,8 +3409,7 @@ static int cm_apr_handler(struct cm_work *work)
goto out;
}
cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
- ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
- cm_id_priv->msg = NULL;
+ ib_cancel_mad(cm_id_priv->msg);
cm_queue_work_unlock(cm_id_priv, work);
return 0;
out:
@@ -3471,6 +3469,7 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
{
struct cm_id_private *cm_id_priv;
struct ib_mad_send_buf *msg;
+ struct cm_av av = {};
unsigned long flags;
int ret;
@@ -3479,42 +3478,43 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
return -EINVAL;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
- ret = cm_init_av_by_path(param->path, param->sgid_attr,
- &cm_id_priv->av,
- cm_id_priv);
+ ret = cm_init_av_by_path(param->path, param->sgid_attr, &av);
if (ret)
- goto out;
+ return ret;
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ cm_move_av_from_path(&cm_id_priv->av, &av);
cm_id->service_id = param->service_id;
cm_id->service_mask = ~cpu_to_be64(0);
cm_id_priv->timeout_ms = param->timeout_ms;
cm_id_priv->max_cm_retries = param->max_cm_retries;
- ret = cm_alloc_msg(cm_id_priv, &msg);
- if (ret)
- goto out;
-
- cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
- param);
- msg->timeout_ms = cm_id_priv->timeout_ms;
- msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
-
- spin_lock_irqsave(&cm_id_priv->lock, flags);
- if (cm_id->state == IB_CM_IDLE) {
- trace_icm_send_sidr_req(&cm_id_priv->id);
- ret = ib_post_send_mad(msg, NULL);
- } else {
+ if (cm_id->state != IB_CM_IDLE) {
ret = -EINVAL;
+ goto out_unlock;
}
- if (ret) {
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- cm_free_msg(msg);
- goto out;
+ msg = cm_alloc_priv_msg(cm_id_priv);
+ if (IS_ERR(msg)) {
+ ret = PTR_ERR(msg);
+ goto out_unlock;
}
+
+ cm_format_sidr_req((struct cm_sidr_req_msg *)msg->mad, cm_id_priv,
+ param);
+ msg->timeout_ms = cm_id_priv->timeout_ms;
+ msg->context[1] = (void *)(unsigned long)IB_CM_SIDR_REQ_SENT;
+
+ trace_icm_send_sidr_req(&cm_id_priv->id);
+ ret = ib_post_send_mad(msg, NULL);
+ if (ret)
+ goto out_free;
cm_id->state = IB_CM_SIDR_REQ_SENT;
- cm_id_priv->msg = msg;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
-out:
+ return 0;
+out_free:
+ cm_free_priv_msg(msg);
+out_unlock:
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret;
}
EXPORT_SYMBOL(ib_send_cm_sidr_req);
@@ -3564,8 +3564,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
cm_id_priv->tid = sidr_req_msg->hdr.tid;
wc = work->mad_recv_wc->wc;
- cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
- cm_id_priv->av.dgid.global.interface_id = 0;
+ cm_id_priv->sidr_slid = wc->slid;
ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
work->mad_recv_wc->recv_buf.grh,
&cm_id_priv->av);
@@ -3576,8 +3575,8 @@ static int cm_sidr_req_handler(struct cm_work *work)
listen_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
if (listen_cm_id_priv) {
spin_unlock_irq(&cm.lock);
- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
- counter[CM_SIDR_REQ_COUNTER]);
+ atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
+ [CM_SIDR_REQ_COUNTER]);
goto out; /* Duplicate message. */
}
cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
@@ -3661,9 +3660,9 @@ static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
if (cm_id_priv->id.state != IB_CM_SIDR_REQ_RCVD)
return -EINVAL;
- ret = cm_alloc_msg(cm_id_priv, &msg);
- if (ret)
- return ret;
+ msg = cm_alloc_msg(cm_id_priv);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
param);
@@ -3737,7 +3736,7 @@ static int cm_sidr_rep_handler(struct cm_work *work)
goto out;
}
cm_id_priv->id.state = IB_CM_IDLE;
- ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
+ ib_cancel_mad(cm_id_priv->msg);
spin_unlock_irq(&cm_id_priv->lock);
cm_format_sidr_rep_event(work, cm_id_priv);
@@ -3748,22 +3747,26 @@ out:
return -EINVAL;
}
-static void cm_process_send_error(struct ib_mad_send_buf *msg,
+static void cm_process_send_error(struct cm_id_private *cm_id_priv,
+ struct ib_mad_send_buf *msg,
+ enum ib_cm_state state,
enum ib_wc_status wc_status)
{
- struct cm_id_private *cm_id_priv;
- struct ib_cm_event cm_event;
- enum ib_cm_state state;
+ struct ib_cm_event cm_event = {};
int ret;
- memset(&cm_event, 0, sizeof cm_event);
- cm_id_priv = msg->context[0];
-
/* Discard old sends or ones without a response. */
spin_lock_irq(&cm_id_priv->lock);
- state = (enum ib_cm_state) (unsigned long) msg->context[1];
- if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
- goto discard;
+ if (msg != cm_id_priv->msg) {
+ spin_unlock_irq(&cm_id_priv->lock);
+ cm_free_msg(msg);
+ return;
+ }
+ cm_free_priv_msg(msg);
+
+ if (state != cm_id_priv->id.state || wc_status == IB_WC_SUCCESS ||
+ wc_status == IB_WC_WR_FLUSH_ERR)
+ goto out_unlock;
trace_icm_mad_send_err(state, wc_status);
switch (state) {
@@ -3786,26 +3789,27 @@ static void cm_process_send_error(struct ib_mad_send_buf *msg,
cm_event.event = IB_CM_SIDR_REQ_ERROR;
break;
default:
- goto discard;
+ goto out_unlock;
}
spin_unlock_irq(&cm_id_priv->lock);
cm_event.param.send_status = wc_status;
/* No other events can occur on the cm_id at this point. */
ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
- cm_free_msg(msg);
if (ret)
ib_destroy_cm_id(&cm_id_priv->id);
return;
-discard:
+out_unlock:
spin_unlock_irq(&cm_id_priv->lock);
- cm_free_msg(msg);
}
static void cm_send_handler(struct ib_mad_agent *mad_agent,
struct ib_mad_send_wc *mad_send_wc)
{
struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
+ struct cm_id_private *cm_id_priv = msg->context[0];
+ enum ib_cm_state state =
+ (enum ib_cm_state)(unsigned long)msg->context[1];
struct cm_port *port;
u16 attr_index;
@@ -3818,28 +3822,19 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
* set to a cm_id), and is not a REJ, then it is a send that was
* manually retried.
*/
- if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
+ if (!cm_id_priv && (attr_index != CM_REJ_COUNTER))
msg->retries = 1;
- atomic_long_add(1 + msg->retries,
- &port->counter_group[CM_XMIT].counter[attr_index]);
+ atomic_long_add(1 + msg->retries, &port->counters[CM_XMIT][attr_index]);
if (msg->retries)
atomic_long_add(msg->retries,
- &port->counter_group[CM_XMIT_RETRIES].
- counter[attr_index]);
+ &port->counters[CM_XMIT_RETRIES][attr_index]);
- switch (mad_send_wc->status) {
- case IB_WC_SUCCESS:
- case IB_WC_WR_FLUSH_ERR:
- cm_free_msg(msg);
- break;
- default:
- if (msg->context[0] && msg->context[1])
- cm_process_send_error(msg, mad_send_wc->status);
- else
- cm_free_msg(msg);
- break;
- }
+ if (cm_id_priv)
+ cm_process_send_error(cm_id_priv, msg, state,
+ mad_send_wc->status);
+ else
+ cm_free_response_msg(msg);
}
static void cm_work_handler(struct work_struct *_work)
@@ -3963,9 +3958,7 @@ out:
static int cm_migrate(struct ib_cm_id *cm_id)
{
struct cm_id_private *cm_id_priv;
- struct cm_av tmp_av;
unsigned long flags;
- int tmp_send_port_not_ready;
int ret = 0;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
@@ -3974,14 +3967,7 @@ static int cm_migrate(struct ib_cm_id *cm_id)
(cm_id->lap_state == IB_CM_LAP_UNINIT ||
cm_id->lap_state == IB_CM_LAP_IDLE)) {
cm_id->lap_state = IB_CM_LAP_IDLE;
- /* Swap address vector */
- tmp_av = cm_id_priv->av;
cm_id_priv->av = cm_id_priv->alt_av;
- cm_id_priv->alt_av = tmp_av;
- /* Swap port send ready state */
- tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready;
- cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready;
- cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready;
} else
ret = -EINVAL;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
@@ -4063,8 +4049,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
}
attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
- atomic_long_inc(&port->counter_group[CM_RECV].
- counter[attr_id - CM_ATTR_ID_OFFSET]);
+ atomic_long_inc(&port->counters[CM_RECV][attr_id - CM_ATTR_ID_OFFSET]);
work = kmalloc(struct_size(work, path, paths), GFP_KERNEL);
if (!work) {
@@ -4116,7 +4101,8 @@ static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
IB_ACCESS_REMOTE_ATOMIC;
qp_attr->pkey_index = cm_id_priv->av.pkey_index;
- qp_attr->port_num = cm_id_priv->av.port->port_num;
+ if (cm_id_priv->av.port)
+ qp_attr->port_num = cm_id_priv->av.port->port_num;
ret = 0;
break;
default:
@@ -4158,7 +4144,8 @@ static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
cm_id_priv->responder_resources;
qp_attr->min_rnr_timer = 0;
}
- if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr)) {
+ if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr) &&
+ cm_id_priv->alt_av.port) {
*qp_attr_mask |= IB_QP_ALT_PATH;
qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
@@ -4219,7 +4206,9 @@ static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
}
} else {
*qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
- qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
+ if (cm_id_priv->alt_av.port)
+ qp_attr->alt_port_num =
+ cm_id_priv->alt_av.port->port_num;
qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
@@ -4262,59 +4251,74 @@ int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
}
EXPORT_SYMBOL(ib_cm_init_qp_attr);
-static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
- char *buf)
+static ssize_t cm_show_counter(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *attr, char *buf)
{
- struct cm_counter_group *group;
- struct cm_counter_attribute *cm_attr;
+ struct cm_counter_attribute *cm_attr =
+ container_of(attr, struct cm_counter_attribute, attr);
+ struct cm_device *cm_dev = ib_get_client_data(ibdev, &cm_client);
- group = container_of(obj, struct cm_counter_group, obj);
- cm_attr = container_of(attr, struct cm_counter_attribute, attr);
+ if (WARN_ON(!cm_dev))
+ return -EINVAL;
- return sysfs_emit(buf, "%ld\n",
- atomic_long_read(&group->counter[cm_attr->index]));
+ return sysfs_emit(
+ buf, "%ld\n",
+ atomic_long_read(
+ &cm_dev->port[port_num - 1]
+ ->counters[cm_attr->group][cm_attr->index]));
}
-static const struct sysfs_ops cm_counter_ops = {
- .show = cm_show_counter
-};
-
-static struct kobj_type cm_counter_obj_type = {
- .sysfs_ops = &cm_counter_ops,
- .default_attrs = cm_counter_default_attrs
-};
-
-static int cm_create_port_fs(struct cm_port *port)
-{
- int i, ret;
-
- for (i = 0; i < CM_COUNTER_GROUPS; i++) {
- ret = ib_port_register_module_stat(port->cm_dev->ib_device,
- port->port_num,
- &port->counter_group[i].obj,
- &cm_counter_obj_type,
- counter_group_names[i]);
- if (ret)
- goto error;
+#define CM_COUNTER_ATTR(_name, _group, _index) \
+ { \
+ .attr = __ATTR(_name, 0444, cm_show_counter, NULL), \
+ .group = _group, .index = _index \
}
- return 0;
-
-error:
- while (i--)
- ib_port_unregister_module_stat(&port->counter_group[i].obj);
- return ret;
-
-}
-
-static void cm_remove_port_fs(struct cm_port *port)
-{
- int i;
-
- for (i = 0; i < CM_COUNTER_GROUPS; i++)
- ib_port_unregister_module_stat(&port->counter_group[i].obj);
+#define CM_COUNTER_GROUP(_group, _name) \
+ static struct cm_counter_attribute cm_counter_attr_##_group[] = { \
+ CM_COUNTER_ATTR(req, _group, CM_REQ_COUNTER), \
+ CM_COUNTER_ATTR(mra, _group, CM_MRA_COUNTER), \
+ CM_COUNTER_ATTR(rej, _group, CM_REJ_COUNTER), \
+ CM_COUNTER_ATTR(rep, _group, CM_REP_COUNTER), \
+ CM_COUNTER_ATTR(rtu, _group, CM_RTU_COUNTER), \
+ CM_COUNTER_ATTR(dreq, _group, CM_DREQ_COUNTER), \
+ CM_COUNTER_ATTR(drep, _group, CM_DREP_COUNTER), \
+ CM_COUNTER_ATTR(sidr_req, _group, CM_SIDR_REQ_COUNTER), \
+ CM_COUNTER_ATTR(sidr_rep, _group, CM_SIDR_REP_COUNTER), \
+ CM_COUNTER_ATTR(lap, _group, CM_LAP_COUNTER), \
+ CM_COUNTER_ATTR(apr, _group, CM_APR_COUNTER), \
+ }; \
+ static struct attribute *cm_counter_attrs_##_group[] = { \
+ &cm_counter_attr_##_group[0].attr.attr, \
+ &cm_counter_attr_##_group[1].attr.attr, \
+ &cm_counter_attr_##_group[2].attr.attr, \
+ &cm_counter_attr_##_group[3].attr.attr, \
+ &cm_counter_attr_##_group[4].attr.attr, \
+ &cm_counter_attr_##_group[5].attr.attr, \
+ &cm_counter_attr_##_group[6].attr.attr, \
+ &cm_counter_attr_##_group[7].attr.attr, \
+ &cm_counter_attr_##_group[8].attr.attr, \
+ &cm_counter_attr_##_group[9].attr.attr, \
+ &cm_counter_attr_##_group[10].attr.attr, \
+ NULL, \
+ }; \
+ static const struct attribute_group cm_counter_group_##_group = { \
+ .name = _name, \
+ .attrs = cm_counter_attrs_##_group, \
+ };
-}
+CM_COUNTER_GROUP(CM_XMIT, "cm_tx_msgs")
+CM_COUNTER_GROUP(CM_XMIT_RETRIES, "cm_tx_retries")
+CM_COUNTER_GROUP(CM_RECV, "cm_rx_msgs")
+CM_COUNTER_GROUP(CM_RECV_DUPLICATES, "cm_rx_duplicates")
+
+static const struct attribute_group *cm_counter_groups[] = {
+ &cm_counter_group_CM_XMIT,
+ &cm_counter_group_CM_XMIT_RETRIES,
+ &cm_counter_group_CM_RECV,
+ &cm_counter_group_CM_RECV_DUPLICATES,
+ NULL,
+};
static int cm_add_one(struct ib_device *ib_device)
{
@@ -4337,10 +4341,14 @@ static int cm_add_one(struct ib_device *ib_device)
if (!cm_dev)
return -ENOMEM;
+ kref_init(&cm_dev->kref);
+ spin_lock_init(&cm_dev->mad_agent_lock);
cm_dev->ib_device = ib_device;
cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay;
cm_dev->going_down = 0;
+ ib_set_client_data(ib_device, &cm_client, cm_dev);
+
set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
rdma_for_each_port (ib_device, i) {
if (!rdma_cap_ib_cm(ib_device, i))
@@ -4356,10 +4364,8 @@ static int cm_add_one(struct ib_device *ib_device)
port->cm_dev = cm_dev;
port->port_num = i;
- INIT_LIST_HEAD(&port->cm_priv_prim_list);
- INIT_LIST_HEAD(&port->cm_priv_altr_list);
-
- ret = cm_create_port_fs(port);
+ ret = ib_port_register_client_groups(ib_device, i,
+ cm_counter_groups);
if (ret)
goto error1;
@@ -4388,8 +4394,6 @@ static int cm_add_one(struct ib_device *ib_device)
goto free;
}
- ib_set_client_data(ib_device, &cm_client, cm_dev);
-
write_lock_irqsave(&cm.device_lock, flags);
list_add_tail(&cm_dev->list, &cm.device_list);
write_unlock_irqrestore(&cm.device_lock, flags);
@@ -4398,11 +4402,10 @@ static int cm_add_one(struct ib_device *ib_device)
error3:
ib_unregister_mad_agent(port->mad_agent);
error2:
- cm_remove_port_fs(port);
+ ib_port_unregister_client_groups(ib_device, i, cm_counter_groups);
error1:
port_modify.set_port_cap_mask = 0;
port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
- kfree(port);
while (--i) {
if (!rdma_cap_ib_cm(ib_device, i))
continue;
@@ -4410,11 +4413,11 @@ error1:
port = cm_dev->port[i-1];
ib_modify_port(ib_device, port->port_num, 0, &port_modify);
ib_unregister_mad_agent(port->mad_agent);
- cm_remove_port_fs(port);
- kfree(port);
+ ib_port_unregister_client_groups(ib_device, i,
+ cm_counter_groups);
}
free:
- kfree(cm_dev);
+ cm_device_put(cm_dev);
return ret;
}
@@ -4422,8 +4425,6 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
{
struct cm_device *cm_dev = client_data;
struct cm_port *port;
- struct cm_id_private *cm_id_priv;
- struct ib_mad_agent *cur_mad_agent;
struct ib_port_modify port_modify = {
.clr_port_cap_mask = IB_PORT_CM_SUP
};
@@ -4439,34 +4440,33 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
spin_unlock_irq(&cm.lock);
rdma_for_each_port (ib_device, i) {
+ struct ib_mad_agent *mad_agent;
+
if (!rdma_cap_ib_cm(ib_device, i))
continue;
port = cm_dev->port[i-1];
+ mad_agent = port->mad_agent;
ib_modify_port(ib_device, port->port_num, 0, &port_modify);
- /* Mark all the cm_id's as not valid */
- spin_lock_irq(&cm.lock);
- list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list)
- cm_id_priv->altr_send_port_not_ready = 1;
- list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list)
- cm_id_priv->prim_send_port_not_ready = 1;
- spin_unlock_irq(&cm.lock);
/*
* We flush the queue here after the going_down set, this
* verify that no new works will be queued in the recv handler,
* after that we can call the unregister_mad_agent
*/
flush_workqueue(cm.wq);
- spin_lock_irq(&cm.state_lock);
- cur_mad_agent = port->mad_agent;
+ /*
+ * The above ensures no call paths from the work are running,
+ * the remaining paths all take the mad_agent_lock.
+ */
+ spin_lock(&cm_dev->mad_agent_lock);
port->mad_agent = NULL;
- spin_unlock_irq(&cm.state_lock);
- ib_unregister_mad_agent(cur_mad_agent);
- cm_remove_port_fs(port);
- kfree(port);
+ spin_unlock(&cm_dev->mad_agent_lock);
+ ib_unregister_mad_agent(mad_agent);
+ ib_port_unregister_client_groups(ib_device, i,
+ cm_counter_groups);
}
- kfree(cm_dev);
+ cm_device_put(cm_dev);
}
static int __init ib_cm_init(void)
@@ -4476,7 +4476,6 @@ static int __init ib_cm_init(void)
INIT_LIST_HEAD(&cm.device_list);
rwlock_init(&cm.device_lock);
spin_lock_init(&cm.lock);
- spin_lock_init(&cm.state_lock);
cm.listen_service_table = RB_ROOT;
cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
cm.remote_id_table = RB_ROOT;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index ab148a696c0c..515a7e95a421 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -926,25 +926,12 @@ static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
return ret;
}
-static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
-{
- struct ib_qp_attr qp_attr;
- int qp_attr_mask, ret;
-
- qp_attr.qp_state = IB_QPS_INIT;
- ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
- if (ret)
- return ret;
-
- return ib_modify_qp(qp, &qp_attr, qp_attr_mask);
-}
-
int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
struct ib_qp_init_attr *qp_init_attr)
{
struct rdma_id_private *id_priv;
struct ib_qp *qp;
- int ret;
+ int ret = 0;
id_priv = container_of(id, struct rdma_id_private, id);
if (id->device != pd->device) {
@@ -961,8 +948,6 @@ int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
if (id->qp_type == IB_QPT_UD)
ret = cma_init_ud_qp(id_priv, qp);
- else
- ret = cma_init_conn_qp(id_priv, qp);
if (ret)
goto out_destroy;
@@ -1852,6 +1837,7 @@ static void _destroy_id(struct rdma_id_private *id_priv,
{
cma_cancel_operation(id_priv, state);
+ rdma_restrack_del(&id_priv->res);
if (id_priv->cma_dev) {
if (rdma_cap_ib_cm(id_priv->id.device, 1)) {
if (id_priv->cm_id.ib)
@@ -1861,7 +1847,6 @@ static void _destroy_id(struct rdma_id_private *id_priv,
iw_destroy_cm_id(id_priv->cm_id.iw);
}
cma_leave_mc_groups(id_priv);
- rdma_restrack_del(&id_priv->res);
cma_release_dev(id_priv);
}
@@ -2472,8 +2457,10 @@ static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
if (IS_ERR(id))
return PTR_ERR(id);
+ mutex_lock(&id_priv->qp_mutex);
id->tos = id_priv->tos;
id->tos_set = id_priv->tos_set;
+ mutex_unlock(&id_priv->qp_mutex);
id->afonly = id_priv->afonly;
id_priv->cm_id.iw = id;
@@ -2534,8 +2521,10 @@ static int cma_listen_on_dev(struct rdma_id_private *id_priv,
cma_id_get(id_priv);
dev_id_priv->internal_id = 1;
dev_id_priv->afonly = id_priv->afonly;
+ mutex_lock(&id_priv->qp_mutex);
dev_id_priv->tos_set = id_priv->tos_set;
dev_id_priv->tos = id_priv->tos;
+ mutex_unlock(&id_priv->qp_mutex);
ret = rdma_listen(&dev_id_priv->id, id_priv->backlog);
if (ret)
@@ -2582,8 +2571,10 @@ void rdma_set_service_type(struct rdma_cm_id *id, int tos)
struct rdma_id_private *id_priv;
id_priv = container_of(id, struct rdma_id_private, id);
+ mutex_lock(&id_priv->qp_mutex);
id_priv->tos = (u8) tos;
id_priv->tos_set = true;
+ mutex_unlock(&id_priv->qp_mutex);
}
EXPORT_SYMBOL(rdma_set_service_type);
@@ -2610,8 +2601,10 @@ int rdma_set_ack_timeout(struct rdma_cm_id *id, u8 timeout)
return -EINVAL;
id_priv = container_of(id, struct rdma_id_private, id);
+ mutex_lock(&id_priv->qp_mutex);
id_priv->timeout = timeout;
id_priv->timeout_set = true;
+ mutex_unlock(&id_priv->qp_mutex);
return 0;
}
@@ -2647,8 +2640,10 @@ int rdma_set_min_rnr_timer(struct rdma_cm_id *id, u8 min_rnr_timer)
return -EINVAL;
id_priv = container_of(id, struct rdma_id_private, id);
+ mutex_lock(&id_priv->qp_mutex);
id_priv->min_rnr_timer = min_rnr_timer;
id_priv->min_rnr_timer_set = true;
+ mutex_unlock(&id_priv->qp_mutex);
return 0;
}
@@ -2819,7 +2814,8 @@ static int cma_resolve_ib_route(struct rdma_id_private *id_priv,
cma_init_resolve_route_work(work, id_priv);
- route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
+ if (!route->path_rec)
+ route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
if (!route->path_rec) {
ret = -ENOMEM;
goto err1;
@@ -3034,8 +3030,11 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
u8 default_roce_tos = id_priv->cma_dev->default_roce_tos[id_priv->id.port_num -
rdma_start_port(id_priv->cma_dev->device)];
- u8 tos = id_priv->tos_set ? id_priv->tos : default_roce_tos;
+ u8 tos;
+ mutex_lock(&id_priv->qp_mutex);
+ tos = id_priv->tos_set ? id_priv->tos : default_roce_tos;
+ mutex_unlock(&id_priv->qp_mutex);
work = kzalloc(sizeof *work, GFP_KERNEL);
if (!work)
@@ -3082,8 +3081,12 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
* PacketLifeTime = local ACK timeout/2
* as a reasonable approximation for RoCE networks.
*/
- route->path_rec->packet_life_time = id_priv->timeout_set ?
- id_priv->timeout - 1 : CMA_IBOE_PACKET_LIFETIME;
+ mutex_lock(&id_priv->qp_mutex);
+ if (id_priv->timeout_set && id_priv->timeout)
+ route->path_rec->packet_life_time = id_priv->timeout - 1;
+ else
+ route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME;
+ mutex_unlock(&id_priv->qp_mutex);
if (!route->path_rec->mtu) {
ret = -EINVAL;
@@ -4107,8 +4110,11 @@ static int cma_connect_iw(struct rdma_id_private *id_priv,
if (IS_ERR(cm_id))
return PTR_ERR(cm_id);
+ mutex_lock(&id_priv->qp_mutex);
cm_id->tos = id_priv->tos;
cm_id->tos_set = id_priv->tos_set;
+ mutex_unlock(&id_priv->qp_mutex);
+
id_priv->cm_id.iw = cm_id;
memcpy(&cm_id->local_addr, cma_src_addr(id_priv),
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 29809dd30041..647cca4e0240 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -78,8 +78,6 @@ static inline struct rdma_dev_net *rdma_net_to_dev_net(struct net *net)
return net_generic(net, rdma_dev_net_id);
}
-int ib_device_register_sysfs(struct ib_device *device);
-void ib_device_unregister_sysfs(struct ib_device *device);
int ib_device_rename(struct ib_device *ibdev, const char *name);
int ib_device_set_dim(struct ib_device *ibdev, u8 use_dim);
@@ -214,7 +212,7 @@ int ib_nl_handle_ip_res_resp(struct sk_buff *skb,
struct nlmsghdr *nlh,
struct netlink_ext_ack *extack);
-int ib_get_cached_subnet_prefix(struct ib_device *device,
+void ib_get_cached_subnet_prefix(struct ib_device *device,
u32 port_num,
u64 *sn_pfx);
@@ -378,13 +376,16 @@ struct net_device *rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr *attr);
void ib_free_port_attrs(struct ib_core_device *coredev);
int ib_setup_port_attrs(struct ib_core_device *coredev);
+struct rdma_hw_stats *ib_get_hw_stats_port(struct ib_device *ibdev, u32 port_num);
+void ib_device_release_hw_stats(struct hw_stats_device_data *data);
+int ib_setup_device_attrs(struct ib_device *ibdev);
int rdma_compatdev_set(u8 enable);
-int ib_port_register_module_stat(struct ib_device *device, u32 port_num,
- struct kobject *kobj, struct kobj_type *ktype,
- const char *name);
-void ib_port_unregister_module_stat(struct kobject *kobj);
+int ib_port_register_client_groups(struct ib_device *ibdev, u32 port_num,
+ const struct attribute_group **groups);
+void ib_port_unregister_client_groups(struct ib_device *ibdev, u32 port_num,
+ const struct attribute_group **groups);
int ib_device_set_netns_put(struct sk_buff *skb,
struct ib_device *dev, u32 ns_fd);
diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c
index 15493357cfef..df9e6c5e4ddf 100644
--- a/drivers/infiniband/core/counters.c
+++ b/drivers/infiniband/core/counters.c
@@ -605,10 +605,10 @@ void rdma_counter_init(struct ib_device *dev)
port_counter->mode.mode = RDMA_COUNTER_MODE_NONE;
mutex_init(&port_counter->lock);
- if (!dev->ops.alloc_hw_stats)
+ if (!dev->ops.alloc_hw_port_stats)
continue;
- port_counter->hstats = dev->ops.alloc_hw_stats(dev, port);
+ port_counter->hstats = dev->ops.alloc_hw_port_stats(dev, port);
if (!port_counter->hstats)
goto fail;
}
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index c660cef66ac6..fa20b1824fb8 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -491,6 +491,8 @@ static void ib_device_release(struct device *device)
free_netdevs(dev);
WARN_ON(refcount_read(&dev->refcount));
+ if (dev->hw_stats_data)
+ ib_device_release_hw_stats(dev->hw_stats_data);
if (dev->port_data) {
ib_cache_release_one(dev);
ib_security_release_port_pkey_list(dev);
@@ -584,7 +586,6 @@ struct ib_device *_ib_alloc_device(size_t size)
return NULL;
}
- device->groups[0] = &ib_dev_attr_group;
rdma_init_coredev(&device->coredev, device, &init_net);
INIT_LIST_HEAD(&device->event_handler_list);
@@ -886,15 +887,8 @@ static void ib_policy_change_task(struct work_struct *work)
rdma_for_each_port (dev, i) {
u64 sp;
- int ret = ib_get_cached_subnet_prefix(dev,
- i,
- &sp);
-
- WARN_ONCE(ret,
- "ib_get_cached_subnet_prefix err: %d, this should never happen here\n",
- ret);
- if (!ret)
- ib_security_cache_change(dev, i, sp);
+ ib_get_cached_subnet_prefix(dev, i, &sp);
+ ib_security_cache_change(dev, i, sp);
}
}
up_read(&devices_rwsem);
@@ -1394,6 +1388,12 @@ int ib_register_device(struct ib_device *device, const char *name,
return ret;
}
+ device->groups[0] = &ib_dev_attr_group;
+ device->groups[1] = device->ops.device_group;
+ ret = ib_setup_device_attrs(device);
+ if (ret)
+ goto cache_cleanup;
+
ib_device_register_rdmacg(device);
rdma_counter_init(device);
@@ -1407,7 +1407,7 @@ int ib_register_device(struct ib_device *device, const char *name,
if (ret)
goto cg_cleanup;
- ret = ib_device_register_sysfs(device);
+ ret = ib_setup_port_attrs(&device->coredev);
if (ret) {
dev_warn(&device->dev,
"Couldn't register device with driver model\n");
@@ -1449,6 +1449,7 @@ dev_cleanup:
cg_cleanup:
dev_set_uevent_suppress(&device->dev, false);
ib_device_unregister_rdmacg(device);
+cache_cleanup:
ib_cache_cleanup_one(device);
return ret;
}
@@ -1473,7 +1474,7 @@ static void __ib_unregister_device(struct ib_device *ib_dev)
/* Expedite removing unregistered pointers from the hash table */
free_netdevs(ib_dev);
- ib_device_unregister_sysfs(ib_dev);
+ ib_free_port_attrs(&ib_dev->coredev);
device_del(&ib_dev->dev);
ib_device_unregister_rdmacg(ib_dev);
ib_cache_cleanup_one(ib_dev);
@@ -1691,13 +1692,11 @@ int ib_device_set_netns_put(struct sk_buff *skb,
}
/*
- * Currently supported only for those providers which support
- * disassociation and don't do port specific sysfs init. Once a
- * port_cleanup infrastructure is implemented, this limitation will be
- * removed.
+ * All the ib_clients, including uverbs, are reset when the namespace is
+ * changed and this cannot be blocked waiting for userspace to do
+ * something, so disassociation is mandatory.
*/
- if (!dev->ops.disassociate_ucontext || dev->ops.init_port ||
- ib_devices_shared_netns) {
+ if (!dev->ops.disassociate_ucontext || ib_devices_shared_netns) {
ret = -EOPNOTSUPP;
goto ns_err;
}
@@ -2595,7 +2594,8 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, add_gid);
SET_DEVICE_OP(dev_ops, advise_mr);
SET_DEVICE_OP(dev_ops, alloc_dm);
- SET_DEVICE_OP(dev_ops, alloc_hw_stats);
+ SET_DEVICE_OP(dev_ops, alloc_hw_device_stats);
+ SET_DEVICE_OP(dev_ops, alloc_hw_port_stats);
SET_DEVICE_OP(dev_ops, alloc_mr);
SET_DEVICE_OP(dev_ops, alloc_mr_integrity);
SET_DEVICE_OP(dev_ops, alloc_mw);
@@ -2637,6 +2637,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, destroy_rwq_ind_table);
SET_DEVICE_OP(dev_ops, destroy_srq);
SET_DEVICE_OP(dev_ops, destroy_wq);
+ SET_DEVICE_OP(dev_ops, device_group);
SET_DEVICE_OP(dev_ops, detach_mcast);
SET_DEVICE_OP(dev_ops, disassociate_ucontext);
SET_DEVICE_OP(dev_ops, drain_rq);
@@ -2660,7 +2661,6 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, get_vf_config);
SET_DEVICE_OP(dev_ops, get_vf_guid);
SET_DEVICE_OP(dev_ops, get_vf_stats);
- SET_DEVICE_OP(dev_ops, init_port);
SET_DEVICE_OP(dev_ops, iw_accept);
SET_DEVICE_OP(dev_ops, iw_add_ref);
SET_DEVICE_OP(dev_ops, iw_connect);
@@ -2683,6 +2683,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, modify_wq);
SET_DEVICE_OP(dev_ops, peek_cq);
SET_DEVICE_OP(dev_ops, poll_cq);
+ SET_DEVICE_OP(dev_ops, port_groups);
SET_DEVICE_OP(dev_ops, post_recv);
SET_DEVICE_OP(dev_ops, post_send);
SET_DEVICE_OP(dev_ops, post_srq_recv);
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index da8adadf4755..42261152b489 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -211,8 +211,7 @@ static void free_cm_id(struct iwcm_id_private *cm_id_priv)
*/
static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv)
{
- BUG_ON(atomic_read(&cm_id_priv->refcount)==0);
- if (atomic_dec_and_test(&cm_id_priv->refcount)) {
+ if (refcount_dec_and_test(&cm_id_priv->refcount)) {
BUG_ON(!list_empty(&cm_id_priv->work_list));
free_cm_id(cm_id_priv);
return 1;
@@ -225,7 +224,7 @@ static void add_ref(struct iw_cm_id *cm_id)
{
struct iwcm_id_private *cm_id_priv;
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
- atomic_inc(&cm_id_priv->refcount);
+ refcount_inc(&cm_id_priv->refcount);
}
static void rem_ref(struct iw_cm_id *cm_id)
@@ -257,7 +256,7 @@ struct iw_cm_id *iw_create_cm_id(struct ib_device *device,
cm_id_priv->id.add_ref = add_ref;
cm_id_priv->id.rem_ref = rem_ref;
spin_lock_init(&cm_id_priv->lock);
- atomic_set(&cm_id_priv->refcount, 1);
+ refcount_set(&cm_id_priv->refcount, 1);
init_waitqueue_head(&cm_id_priv->connect_wait);
init_completion(&cm_id_priv->destroy_comp);
INIT_LIST_HEAD(&cm_id_priv->work_list);
@@ -1094,7 +1093,7 @@ static int cm_event_handler(struct iw_cm_id *cm_id,
}
}
- atomic_inc(&cm_id_priv->refcount);
+ refcount_inc(&cm_id_priv->refcount);
if (list_empty(&cm_id_priv->work_list)) {
list_add_tail(&work->list, &cm_id_priv->work_list);
queue_work(iwcm_wq, &work->work);
diff --git a/drivers/infiniband/core/iwcm.h b/drivers/infiniband/core/iwcm.h
index 82c2cd1b0a80..bf74639be128 100644
--- a/drivers/infiniband/core/iwcm.h
+++ b/drivers/infiniband/core/iwcm.h
@@ -52,7 +52,7 @@ struct iwcm_id_private {
wait_queue_head_t connect_wait;
struct list_head work_list;
spinlock_t lock;
- atomic_t refcount;
+ refcount_t refcount;
struct list_head work_free_list;
};
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c
index 932b26f50d03..12a9816fc0e2 100644
--- a/drivers/infiniband/core/iwpm_msg.c
+++ b/drivers/infiniband/core/iwpm_msg.c
@@ -123,7 +123,7 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
ret = iwpm_wait_complete_req(nlmsg_request);
return ret;
pid_query_error:
- pr_info("%s: %s (client = %d)\n", __func__, err_str, nl_client);
+ pr_info("%s: %s (client = %u)\n", __func__, err_str, nl_client);
dev_kfree_skb(skb);
if (nlmsg_request)
iwpm_free_nlmsg_request(&nlmsg_request->kref);
@@ -211,7 +211,7 @@ int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
ret = iwpm_wait_complete_req(nlmsg_request);
return ret;
add_mapping_error:
- pr_info("%s: %s (client = %d)\n", __func__, err_str, nl_client);
+ pr_info("%s: %s (client = %u)\n", __func__, err_str, nl_client);
add_mapping_error_nowarn:
dev_kfree_skb(skb);
if (nlmsg_request)
@@ -304,7 +304,7 @@ int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
ret = iwpm_wait_complete_req(nlmsg_request);
return ret;
query_mapping_error:
- pr_info("%s: %s (client = %d)\n", __func__, err_str, nl_client);
+ pr_info("%s: %s (client = %u)\n", __func__, err_str, nl_client);
query_mapping_error_nowarn:
dev_kfree_skb(skb);
if (nlmsg_request)
@@ -372,7 +372,7 @@ int iwpm_remove_mapping(struct sockaddr_storage *local_addr, u8 nl_client)
"remove_mapping: Local sockaddr:");
return 0;
remove_mapping_error:
- pr_info("%s: %s (client = %d)\n", __func__, err_str, nl_client);
+ pr_info("%s: %s (client = %u)\n", __func__, err_str, nl_client);
if (skb)
dev_kfree_skb_any(skb);
return ret;
@@ -431,7 +431,7 @@ int iwpm_register_pid_cb(struct sk_buff *skb, struct netlink_callback *cb)
strcmp(iwpm_ulib_name, iwpm_name) ||
iwpm_version < IWPM_UABI_VERSION_MIN) {
- pr_info("%s: Incorrect info (dev = %s name = %s version = %d)\n",
+ pr_info("%s: Incorrect info (dev = %s name = %s version = %u)\n",
__func__, dev_name, iwpm_name, iwpm_version);
nlmsg_request->err_code = IWPM_USER_LIB_INFO_ERR;
goto register_pid_response_exit;
@@ -439,7 +439,7 @@ int iwpm_register_pid_cb(struct sk_buff *skb, struct netlink_callback *cb)
iwpm_user_pid = cb->nlh->nlmsg_pid;
iwpm_ulib_version = iwpm_version;
if (iwpm_ulib_version < IWPM_UABI_VERSION)
- pr_warn_once("%s: Down level iwpmd/pid %u. Continuing...",
+ pr_warn_once("%s: Down level iwpmd/pid %d. Continuing...",
__func__, iwpm_user_pid);
atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n",
@@ -650,7 +650,7 @@ int iwpm_remote_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
nl_client = RDMA_NL_GET_CLIENT(cb->nlh->nlmsg_type);
if (!iwpm_valid_client(nl_client)) {
- pr_info("%s: Invalid port mapper client = %d\n",
+ pr_info("%s: Invalid port mapper client = %u\n",
__func__, nl_client);
return ret;
}
@@ -731,13 +731,13 @@ int iwpm_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
iwpm_version = nla_get_u16(nltb[IWPM_NLA_MAPINFO_ULIB_VER]);
if (strcmp(iwpm_ulib_name, iwpm_name) ||
iwpm_version < IWPM_UABI_VERSION_MIN) {
- pr_info("%s: Invalid port mapper name = %s version = %d\n",
+ pr_info("%s: Invalid port mapper name = %s version = %u\n",
__func__, iwpm_name, iwpm_version);
return ret;
}
nl_client = RDMA_NL_GET_CLIENT(cb->nlh->nlmsg_type);
if (!iwpm_valid_client(nl_client)) {
- pr_info("%s: Invalid port mapper client = %d\n",
+ pr_info("%s: Invalid port mapper client = %u\n",
__func__, nl_client);
return ret;
}
@@ -746,7 +746,7 @@ int iwpm_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
iwpm_user_pid = cb->nlh->nlmsg_pid;
if (iwpm_ulib_version < IWPM_UABI_VERSION)
- pr_warn_once("%s: Down level iwpmd/pid %u. Continuing...",
+ pr_warn_once("%s: Down level iwpmd/pid %d. Continuing...",
__func__, iwpm_user_pid);
if (!iwpm_mapinfo_available())
@@ -864,7 +864,7 @@ int iwpm_hello_cb(struct sk_buff *skb, struct netlink_callback *cb)
abi_version = nla_get_u16(nltb[IWPM_NLA_HELLO_ABI_VERSION]);
nl_client = RDMA_NL_GET_CLIENT(cb->nlh->nlmsg_type);
if (!iwpm_valid_client(nl_client)) {
- pr_info("%s: Invalid port mapper client = %d\n",
+ pr_info("%s: Invalid port mapper client = %u\n",
__func__, nl_client);
return ret;
}
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index f80e5550b51f..3f8c019c7260 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -61,7 +61,7 @@ int iwpm_init(u8 nl_client)
{
int ret = 0;
mutex_lock(&iwpm_admin_lock);
- if (atomic_read(&iwpm_admin.refcount) == 0) {
+ if (!refcount_read(&iwpm_admin.refcount)) {
iwpm_hash_bucket = kcalloc(IWPM_MAPINFO_HASH_SIZE,
sizeof(struct hlist_head),
GFP_KERNEL);
@@ -77,8 +77,12 @@ int iwpm_init(u8 nl_client)
ret = -ENOMEM;
goto init_exit;
}
+
+ refcount_set(&iwpm_admin.refcount, 1);
+ } else {
+ refcount_inc(&iwpm_admin.refcount);
}
- atomic_inc(&iwpm_admin.refcount);
+
init_exit:
mutex_unlock(&iwpm_admin_lock);
if (!ret) {
@@ -105,12 +109,12 @@ int iwpm_exit(u8 nl_client)
if (!iwpm_valid_client(nl_client))
return -EINVAL;
mutex_lock(&iwpm_admin_lock);
- if (atomic_read(&iwpm_admin.refcount) == 0) {
+ if (!refcount_read(&iwpm_admin.refcount)) {
mutex_unlock(&iwpm_admin_lock);
pr_err("%s Incorrect usage - negative refcount\n", __func__);
return -EINVAL;
}
- if (atomic_dec_and_test(&iwpm_admin.refcount)) {
+ if (refcount_dec_and_test(&iwpm_admin.refcount)) {
free_hash_bucket();
free_reminfo_bucket();
pr_debug("%s: Resources are destroyed\n", __func__);
@@ -303,7 +307,7 @@ int iwpm_get_remote_info(struct sockaddr_storage *mapped_loc_addr,
int ret = -EINVAL;
if (!iwpm_valid_client(nl_client)) {
- pr_info("%s: Invalid client = %d\n", __func__, nl_client);
+ pr_info("%s: Invalid client = %u\n", __func__, nl_client);
return ret;
}
spin_lock_irqsave(&iwpm_reminfo_lock, flags);
@@ -651,7 +655,7 @@ static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid)
err_str = "Unable to send a nlmsg";
goto mapinfo_num_error;
}
- pr_debug("%s: Sent mapping number = %d\n", __func__, mapping_num);
+ pr_debug("%s: Sent mapping number = %u\n", __func__, mapping_num);
return 0;
mapinfo_num_error:
pr_info("%s: %s\n", __func__, err_str);
diff --git a/drivers/infiniband/core/iwpm_util.h b/drivers/infiniband/core/iwpm_util.h
index eeb8e6010907..e201835de733 100644
--- a/drivers/infiniband/core/iwpm_util.h
+++ b/drivers/infiniband/core/iwpm_util.h
@@ -90,7 +90,7 @@ struct iwpm_remote_info {
};
struct iwpm_admin_data {
- atomic_t refcount;
+ refcount_t refcount;
atomic_t nlmsg_seq;
int client_list[RDMA_NL_NUM_CLIENTS];
u32 reg_list[RDMA_NL_NUM_CLIENTS];
@@ -183,7 +183,7 @@ u32 iwpm_check_registration(u8 nl_client, u32 reg);
void iwpm_set_registration(u8 nl_client, u32 reg);
/**
- * iwpm_get_registration
+ * iwpm_get_registration - Get the client registration
* @nl_client: The index of the netlink client
*
* Returns the client registration type
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 2081e4854fb0..1893aa613ad7 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -351,7 +351,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
/* Validate device and port */
port_priv = ib_get_mad_port(device, port_num);
if (!port_priv) {
- dev_dbg_ratelimited(&device->dev, "%s: Invalid port %d\n",
+ dev_dbg_ratelimited(&device->dev, "%s: Invalid port %u\n",
__func__, port_num);
ret = ERR_PTR(-ENODEV);
goto error1;
@@ -1626,7 +1626,7 @@ static int validate_mad(const struct ib_mad_hdr *mad_hdr,
/* Make sure MAD base version is understood */
if (mad_hdr->base_version != IB_MGMT_BASE_VERSION &&
(!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) {
- pr_err("MAD received with unsupported base version %d %s\n",
+ pr_err("MAD received with unsupported base version %u %s\n",
mad_hdr->base_version, opa ? "(opa)" : "");
goto out;
}
@@ -2459,16 +2459,18 @@ find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
return NULL;
}
-int ib_modify_mad(struct ib_mad_agent *mad_agent,
- struct ib_mad_send_buf *send_buf, u32 timeout_ms)
+int ib_modify_mad(struct ib_mad_send_buf *send_buf, u32 timeout_ms)
{
struct ib_mad_agent_private *mad_agent_priv;
struct ib_mad_send_wr_private *mad_send_wr;
unsigned long flags;
int active;
- mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
- agent);
+ if (!send_buf)
+ return -EINVAL;
+
+ mad_agent_priv = container_of(send_buf->mad_agent,
+ struct ib_mad_agent_private, agent);
spin_lock_irqsave(&mad_agent_priv->lock, flags);
mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
@@ -2493,13 +2495,6 @@ int ib_modify_mad(struct ib_mad_agent *mad_agent,
}
EXPORT_SYMBOL(ib_modify_mad);
-void ib_cancel_mad(struct ib_mad_agent *mad_agent,
- struct ib_mad_send_buf *send_buf)
-{
- ib_modify_mad(mad_agent, send_buf, 0);
-}
-EXPORT_SYMBOL(ib_cancel_mad);
-
static void local_completions(struct work_struct *work)
{
struct ib_mad_agent_private *mad_agent_priv;
@@ -2872,7 +2867,7 @@ static void qp_event_handler(struct ib_event *event, void *qp_context)
/* It's worse than that! He's dead, Jim! */
dev_err(&qp_info->port_priv->device->dev,
- "Fatal error (%d) on MAD QP (%d)\n",
+ "Fatal error (%d) on MAD QP (%u)\n",
event->event, qp_info->qp->qp_num);
}
@@ -3130,9 +3125,9 @@ static void ib_mad_remove_device(struct ib_device *device, void *client_data)
if (ib_agent_port_close(device, i))
dev_err(&device->dev,
- "Couldn't close port %d for agents\n", i);
+ "Couldn't close port %u for agents\n", i);
if (ib_mad_port_close(device, i))
- dev_err(&device->dev, "Couldn't close port %d\n", i);
+ dev_err(&device->dev, "Couldn't close port %u\n", i);
}
}
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index 4aa16b35dad0..1b7445a6f671 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -115,7 +115,6 @@ struct ib_mad_snoop_private {
struct ib_mad_qp_info *qp_info;
int snoop_index;
int mad_snoop_flags;
- atomic_t refcount;
struct completion comp;
};
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index a5dd4b7a74bc..a236532a9026 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -61,7 +61,7 @@ struct mcast_port {
struct mcast_device *dev;
spinlock_t lock;
struct rb_root table;
- atomic_t refcount;
+ refcount_t refcount;
struct completion comp;
u32 port_num;
};
@@ -117,7 +117,7 @@ struct mcast_member {
struct mcast_group *group;
struct list_head list;
enum mcast_state state;
- atomic_t refcount;
+ refcount_t refcount;
struct completion comp;
};
@@ -178,7 +178,7 @@ static struct mcast_group *mcast_insert(struct mcast_port *port,
static void deref_port(struct mcast_port *port)
{
- if (atomic_dec_and_test(&port->refcount))
+ if (refcount_dec_and_test(&port->refcount))
complete(&port->comp);
}
@@ -199,7 +199,7 @@ static void release_group(struct mcast_group *group)
static void deref_member(struct mcast_member *member)
{
- if (atomic_dec_and_test(&member->refcount))
+ if (refcount_dec_and_test(&member->refcount))
complete(&member->comp);
}
@@ -401,7 +401,7 @@ static void process_group_error(struct mcast_group *group)
while (!list_empty(&group->active_list)) {
member = list_entry(group->active_list.next,
struct mcast_member, list);
- atomic_inc(&member->refcount);
+ refcount_inc(&member->refcount);
list_del_init(&member->list);
adjust_membership(group, member->multicast.rec.join_state, -1);
member->state = MCAST_ERROR;
@@ -445,7 +445,7 @@ retest:
struct mcast_member, list);
multicast = &member->multicast;
join_state = multicast->rec.join_state;
- atomic_inc(&member->refcount);
+ refcount_inc(&member->refcount);
if (join_state == (group->rec.join_state & join_state)) {
status = cmp_rec(&group->rec, &multicast->rec,
@@ -497,7 +497,7 @@ static void process_join_error(struct mcast_group *group, int status)
member = list_entry(group->pending_list.next,
struct mcast_member, list);
if (group->last_join == member) {
- atomic_inc(&member->refcount);
+ refcount_inc(&member->refcount);
list_del_init(&member->list);
spin_unlock_irq(&group->lock);
ret = member->multicast.callback(status, &member->multicast);
@@ -589,7 +589,7 @@ static struct mcast_group *acquire_group(struct mcast_port *port,
kfree(group);
group = cur_group;
} else
- atomic_inc(&port->refcount);
+ refcount_inc(&port->refcount);
found:
atomic_inc(&group->refcount);
spin_unlock_irqrestore(&port->lock, flags);
@@ -632,7 +632,7 @@ ib_sa_join_multicast(struct ib_sa_client *client,
member->multicast.callback = callback;
member->multicast.context = context;
init_completion(&member->comp);
- atomic_set(&member->refcount, 1);
+ refcount_set(&member->refcount, 1);
member->state = MCAST_JOINING;
member->group = acquire_group(&dev->port[port_num - dev->start_port],
@@ -840,7 +840,7 @@ static int mcast_add_one(struct ib_device *device)
spin_lock_init(&port->lock);
port->table = RB_ROOT;
init_completion(&port->comp);
- atomic_set(&port->refcount, 1);
+ refcount_set(&port->refcount, 1);
++count;
}
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index 8cd31ef25eff..1b2cc9e45ade 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -98,7 +98,7 @@ get_cb_table(const struct sk_buff *skb, unsigned int type, unsigned int op)
*/
up_read(&rdma_nl_types[type].sem);
- request_module("rdma-netlink-subsys-%d", type);
+ request_module("rdma-netlink-subsys-%u", type);
down_read(&rdma_nl_types[type].sem);
cb_table = READ_ONCE(rdma_nl_types[type].cb_table);
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 34d0cc1a4147..e9b4b2cccaa0 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -2060,13 +2060,14 @@ static int stat_get_doit_default_counter(struct sk_buff *skb,
if (!device)
return -EINVAL;
- if (!device->ops.alloc_hw_stats || !device->ops.get_hw_stats) {
+ if (!device->ops.alloc_hw_port_stats || !device->ops.get_hw_stats) {
ret = -EINVAL;
goto err;
}
port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
- if (!rdma_is_port_valid(device, port)) {
+ stats = ib_get_hw_stats_port(device, port);
+ if (!stats) {
ret = -EINVAL;
goto err;
}
@@ -2088,11 +2089,6 @@ static int stat_get_doit_default_counter(struct sk_buff *skb,
goto err_msg;
}
- stats = device->port_data ? device->port_data[port].hw_stats : NULL;
- if (stats == NULL) {
- ret = -EINVAL;
- goto err_msg;
- }
mutex_lock(&stats->lock);
num_cnts = device->ops.get_hw_stats(device, stats, port, 0);
diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
index 7b638d91a4ec..68197e576433 100644
--- a/drivers/infiniband/core/roce_gid_mgmt.c
+++ b/drivers/infiniband/core/roce_gid_mgmt.c
@@ -186,12 +186,13 @@ is_eth_port_inactive_slave_filter(struct ib_device *ib_dev, u32 port,
return res;
}
-/** is_ndev_for_default_gid_filter - Check if a given netdevice
+/**
+ * is_ndev_for_default_gid_filter - Check if a given netdevice
* can be considered for default GIDs or not.
* @ib_dev: IB device to check
* @port: Port to consider for adding default GID
* @rdma_ndev: rdma netdevice pointer
- * @cookie_ndev: Netdevice to consider to form a default GID
+ * @cookie: Netdevice to consider to form a default GID
*
* is_ndev_for_default_gid_filter() returns true if a given netdevice can be
* considered for deriving default RoCE GID, returns false otherwise.
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
index a588c2038479..5221cce65675 100644
--- a/drivers/infiniband/core/rw.c
+++ b/drivers/infiniband/core/rw.c
@@ -389,7 +389,7 @@ int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
int count = 0, ret;
if (sg_cnt > pages_per_mr || prot_sg_cnt > pages_per_mr) {
- pr_err("SG count too large: sg_cnt=%d, prot_sg_cnt=%d, pages_per_mr=%d\n",
+ pr_err("SG count too large: sg_cnt=%u, prot_sg_cnt=%u, pages_per_mr=%u\n",
sg_cnt, prot_sg_cnt, pages_per_mr);
return -EINVAL;
}
@@ -429,7 +429,7 @@ int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
ret = ib_map_mr_sg_pi(ctx->reg->mr, sg, sg_cnt, NULL, prot_sg,
prot_sg_cnt, NULL, SZ_4K);
if (unlikely(ret)) {
- pr_err("failed to map PI sg (%d)\n", sg_cnt + prot_sg_cnt);
+ pr_err("failed to map PI sg (%u)\n", sg_cnt + prot_sg_cnt);
goto out_destroy_sig_mr;
}
@@ -714,7 +714,7 @@ int rdma_rw_init_mrs(struct ib_qp *qp, struct ib_qp_init_attr *attr)
IB_MR_TYPE_MEM_REG,
max_num_sg, 0);
if (ret) {
- pr_err("%s: failed to allocated %d MRs\n",
+ pr_err("%s: failed to allocated %u MRs\n",
__func__, nr_mrs);
return ret;
}
@@ -724,7 +724,7 @@ int rdma_rw_init_mrs(struct ib_qp *qp, struct ib_qp_init_attr *attr)
ret = ib_mr_pool_init(qp, &qp->sig_mrs, nr_sig_mrs,
IB_MR_TYPE_INTEGRITY, max_num_sg, max_num_sg);
if (ret) {
- pr_err("%s: failed to allocated %d SIG MRs\n",
+ pr_err("%s: failed to allocated %u SIG MRs\n",
__func__, nr_sig_mrs);
goto out_free_rdma_mrs;
}
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 8f1705c403b4..b61576f702b8 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1172,7 +1172,6 @@ EXPORT_SYMBOL(ib_sa_unregister_client);
void ib_sa_cancel_query(int id, struct ib_sa_query *query)
{
unsigned long flags;
- struct ib_mad_agent *agent;
struct ib_mad_send_buf *mad_buf;
xa_lock_irqsave(&queries, flags);
@@ -1180,7 +1179,6 @@ void ib_sa_cancel_query(int id, struct ib_sa_query *query)
xa_unlock_irqrestore(&queries, flags);
return;
}
- agent = query->port->agent;
mad_buf = query->mad_buf;
xa_unlock_irqrestore(&queries, flags);
@@ -1190,7 +1188,7 @@ void ib_sa_cancel_query(int id, struct ib_sa_query *query)
* sent to the MAD layer and has to be cancelled from there.
*/
if (!ib_nl_cancel_request(query))
- ib_cancel_mad(agent, mad_buf);
+ ib_cancel_mad(mad_buf);
}
EXPORT_SYMBOL(ib_sa_cancel_query);
@@ -1444,8 +1442,7 @@ enum opa_pr_supported {
*/
static int opa_pr_query_possible(struct ib_sa_client *client,
struct ib_sa_device *sa_dev,
- struct ib_device *device, u32 port_num,
- struct sa_path_rec *rec)
+ struct ib_device *device, u32 port_num)
{
struct ib_port_attr port_attr;
@@ -1567,8 +1564,7 @@ int ib_sa_path_rec_get(struct ib_sa_client *client,
query->sa_query.port = port;
if (rec->rec_type == SA_PATH_REC_TYPE_OPA) {
- status = opa_pr_query_possible(client, sa_dev, device, port_num,
- rec);
+ status = opa_pr_query_possible(client, sa_dev, device, port_num);
if (status == PR_NOT_SUPPORTED) {
ret = -EINVAL;
goto err1;
diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
index e5a78d1a63c9..3512c2e54efc 100644
--- a/drivers/infiniband/core/security.c
+++ b/drivers/infiniband/core/security.c
@@ -72,7 +72,7 @@ static int get_pkey_and_subnet_prefix(struct ib_port_pkey *pp,
if (ret)
return ret;
- ret = ib_get_cached_subnet_prefix(dev, pp->port_num, subnet_prefix);
+ ib_get_cached_subnet_prefix(dev, pp->port_num, subnet_prefix);
return ret;
}
@@ -586,7 +586,7 @@ int ib_security_modify_qp(struct ib_qp *qp,
WARN_ONCE((qp_attr_mask & IB_QP_PORT &&
rdma_protocol_ib(real_qp->device, qp_attr->port_num) &&
!real_qp->qp_sec),
- "%s: QP security is not initialized for IB QP: %d\n",
+ "%s: QP security is not initialized for IB QP: %u\n",
__func__, real_qp->qp_num);
/* The port/pkey settings are maintained only for the real QP. Open
@@ -664,10 +664,7 @@ static int ib_security_pkey_access(struct ib_device *dev,
if (ret)
return ret;
- ret = ib_get_cached_subnet_prefix(dev, port_num, &subnet_prefix);
-
- if (ret)
- return ret;
+ ib_get_cached_subnet_prefix(dev, port_num, &subnet_prefix);
return security_ib_pkey_access(sec, subnet_prefix, pkey);
}
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 05b702de00e8..6146c3c1cbe5 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -44,110 +44,174 @@
#include <rdma/ib_pma.h>
#include <rdma/ib_cache.h>
#include <rdma/rdma_counter.h>
+#include <rdma/ib_sysfs.h>
-struct ib_port;
+struct port_table_attribute {
+ struct ib_port_attribute attr;
+ char name[8];
+ int index;
+ __be16 attr_id;
+};
struct gid_attr_group {
- struct ib_port *port;
- struct kobject kobj;
- struct attribute_group ndev;
- struct attribute_group type;
+ struct ib_port *port;
+ struct kobject kobj;
+ struct attribute_group groups[2];
+ const struct attribute_group *groups_list[3];
+ struct port_table_attribute attrs_list[];
};
+
struct ib_port {
- struct kobject kobj;
- struct ib_device *ibdev;
+ struct kobject kobj;
+ struct ib_device *ibdev;
struct gid_attr_group *gid_attr_group;
- struct attribute_group gid_group;
- struct attribute_group *pkey_group;
- const struct attribute_group *pma_table;
- struct attribute_group *hw_stats_ag;
- struct rdma_hw_stats *hw_stats;
- u32 port_num;
+ struct hw_stats_port_data *hw_stats_data;
+
+ struct attribute_group groups[3];
+ const struct attribute_group *groups_list[5];
+ u32 port_num;
+ struct port_table_attribute attrs_list[];
};
-struct port_attribute {
- struct attribute attr;
- ssize_t (*show)(struct ib_port *, struct port_attribute *, char *buf);
- ssize_t (*store)(struct ib_port *, struct port_attribute *,
+struct hw_stats_device_attribute {
+ struct device_attribute attr;
+ ssize_t (*show)(struct ib_device *ibdev, struct rdma_hw_stats *stats,
+ unsigned int index, unsigned int port_num, char *buf);
+ ssize_t (*store)(struct ib_device *ibdev, struct rdma_hw_stats *stats,
+ unsigned int index, unsigned int port_num,
const char *buf, size_t count);
};
-#define PORT_ATTR(_name, _mode, _show, _store) \
-struct port_attribute port_attr_##_name = __ATTR(_name, _mode, _show, _store)
-
-#define PORT_ATTR_RO(_name) \
-struct port_attribute port_attr_##_name = __ATTR_RO(_name)
+struct hw_stats_port_attribute {
+ struct ib_port_attribute attr;
+ ssize_t (*show)(struct ib_device *ibdev, struct rdma_hw_stats *stats,
+ unsigned int index, unsigned int port_num, char *buf);
+ ssize_t (*store)(struct ib_device *ibdev, struct rdma_hw_stats *stats,
+ unsigned int index, unsigned int port_num,
+ const char *buf, size_t count);
+};
-struct port_table_attribute {
- struct port_attribute attr;
- char name[8];
- int index;
- __be16 attr_id;
+struct hw_stats_device_data {
+ struct attribute_group group;
+ struct rdma_hw_stats *stats;
+ struct hw_stats_device_attribute attrs[];
};
-struct hw_stats_attribute {
- struct attribute attr;
- ssize_t (*show)(struct kobject *kobj,
- struct attribute *attr, char *buf);
- ssize_t (*store)(struct kobject *kobj,
- struct attribute *attr,
- const char *buf,
- size_t count);
- int index;
- u32 port_num;
+struct hw_stats_port_data {
+ struct rdma_hw_stats *stats;
+ struct hw_stats_port_attribute attrs[];
};
static ssize_t port_attr_show(struct kobject *kobj,
struct attribute *attr, char *buf)
{
- struct port_attribute *port_attr =
- container_of(attr, struct port_attribute, attr);
+ struct ib_port_attribute *port_attr =
+ container_of(attr, struct ib_port_attribute, attr);
struct ib_port *p = container_of(kobj, struct ib_port, kobj);
if (!port_attr->show)
return -EIO;
- return port_attr->show(p, port_attr, buf);
+ return port_attr->show(p->ibdev, p->port_num, port_attr, buf);
}
static ssize_t port_attr_store(struct kobject *kobj,
struct attribute *attr,
const char *buf, size_t count)
{
- struct port_attribute *port_attr =
- container_of(attr, struct port_attribute, attr);
+ struct ib_port_attribute *port_attr =
+ container_of(attr, struct ib_port_attribute, attr);
struct ib_port *p = container_of(kobj, struct ib_port, kobj);
if (!port_attr->store)
return -EIO;
- return port_attr->store(p, port_attr, buf, count);
+ return port_attr->store(p->ibdev, p->port_num, port_attr, buf, count);
+}
+
+struct ib_device *ib_port_sysfs_get_ibdev_kobj(struct kobject *kobj,
+ u32 *port_num)
+{
+ struct ib_port *port = container_of(kobj, struct ib_port, kobj);
+
+ *port_num = port->port_num;
+ return port->ibdev;
}
+EXPORT_SYMBOL(ib_port_sysfs_get_ibdev_kobj);
static const struct sysfs_ops port_sysfs_ops = {
.show = port_attr_show,
.store = port_attr_store
};
+static ssize_t hw_stat_device_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hw_stats_device_attribute *stat_attr =
+ container_of(attr, struct hw_stats_device_attribute, attr);
+ struct ib_device *ibdev = container_of(dev, struct ib_device, dev);
+
+ return stat_attr->show(ibdev, ibdev->hw_stats_data->stats,
+ stat_attr - ibdev->hw_stats_data->attrs, 0, buf);
+}
+
+static ssize_t hw_stat_device_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hw_stats_device_attribute *stat_attr =
+ container_of(attr, struct hw_stats_device_attribute, attr);
+ struct ib_device *ibdev = container_of(dev, struct ib_device, dev);
+
+ return stat_attr->store(ibdev, ibdev->hw_stats_data->stats,
+ stat_attr - ibdev->hw_stats_data->attrs, 0, buf,
+ count);
+}
+
+static ssize_t hw_stat_port_show(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *attr, char *buf)
+{
+ struct hw_stats_port_attribute *stat_attr =
+ container_of(attr, struct hw_stats_port_attribute, attr);
+ struct ib_port *port = ibdev->port_data[port_num].sysfs;
+
+ return stat_attr->show(ibdev, port->hw_stats_data->stats,
+ stat_attr - port->hw_stats_data->attrs,
+ port->port_num, buf);
+}
+
+static ssize_t hw_stat_port_store(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hw_stats_port_attribute *stat_attr =
+ container_of(attr, struct hw_stats_port_attribute, attr);
+ struct ib_port *port = ibdev->port_data[port_num].sysfs;
+
+ return stat_attr->store(ibdev, port->hw_stats_data->stats,
+ stat_attr - port->hw_stats_data->attrs,
+ port->port_num, buf, count);
+}
+
static ssize_t gid_attr_show(struct kobject *kobj,
struct attribute *attr, char *buf)
{
- struct port_attribute *port_attr =
- container_of(attr, struct port_attribute, attr);
+ struct ib_port_attribute *port_attr =
+ container_of(attr, struct ib_port_attribute, attr);
struct ib_port *p = container_of(kobj, struct gid_attr_group,
kobj)->port;
if (!port_attr->show)
return -EIO;
- return port_attr->show(p, port_attr, buf);
+ return port_attr->show(p->ibdev, p->port_num, port_attr, buf);
}
static const struct sysfs_ops gid_attr_sysfs_ops = {
.show = gid_attr_show
};
-static ssize_t state_show(struct ib_port *p, struct port_attribute *unused,
- char *buf)
+static ssize_t state_show(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *unused, char *buf)
{
struct ib_port_attr attr;
ssize_t ret;
@@ -161,7 +225,7 @@ static ssize_t state_show(struct ib_port *p, struct port_attribute *unused,
[IB_PORT_ACTIVE_DEFER] = "ACTIVE_DEFER"
};
- ret = ib_query_port(p->ibdev, p->port_num, &attr);
+ ret = ib_query_port(ibdev, port_num, &attr);
if (ret)
return ret;
@@ -172,81 +236,80 @@ static ssize_t state_show(struct ib_port *p, struct port_attribute *unused,
"UNKNOWN");
}
-static ssize_t lid_show(struct ib_port *p, struct port_attribute *unused,
- char *buf)
+static ssize_t lid_show(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *unused, char *buf)
{
struct ib_port_attr attr;
ssize_t ret;
- ret = ib_query_port(p->ibdev, p->port_num, &attr);
+ ret = ib_query_port(ibdev, port_num, &attr);
if (ret)
return ret;
return sysfs_emit(buf, "0x%x\n", attr.lid);
}
-static ssize_t lid_mask_count_show(struct ib_port *p,
- struct port_attribute *unused,
- char *buf)
+static ssize_t lid_mask_count_show(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *unused, char *buf)
{
struct ib_port_attr attr;
ssize_t ret;
- ret = ib_query_port(p->ibdev, p->port_num, &attr);
+ ret = ib_query_port(ibdev, port_num, &attr);
if (ret)
return ret;
- return sysfs_emit(buf, "%d\n", attr.lmc);
+ return sysfs_emit(buf, "%u\n", attr.lmc);
}
-static ssize_t sm_lid_show(struct ib_port *p, struct port_attribute *unused,
- char *buf)
+static ssize_t sm_lid_show(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *unused, char *buf)
{
struct ib_port_attr attr;
ssize_t ret;
- ret = ib_query_port(p->ibdev, p->port_num, &attr);
+ ret = ib_query_port(ibdev, port_num, &attr);
if (ret)
return ret;
return sysfs_emit(buf, "0x%x\n", attr.sm_lid);
}
-static ssize_t sm_sl_show(struct ib_port *p, struct port_attribute *unused,
- char *buf)
+static ssize_t sm_sl_show(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *unused, char *buf)
{
struct ib_port_attr attr;
ssize_t ret;
- ret = ib_query_port(p->ibdev, p->port_num, &attr);
+ ret = ib_query_port(ibdev, port_num, &attr);
if (ret)
return ret;
- return sysfs_emit(buf, "%d\n", attr.sm_sl);
+ return sysfs_emit(buf, "%u\n", attr.sm_sl);
}
-static ssize_t cap_mask_show(struct ib_port *p, struct port_attribute *unused,
- char *buf)
+static ssize_t cap_mask_show(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *unused, char *buf)
{
struct ib_port_attr attr;
ssize_t ret;
- ret = ib_query_port(p->ibdev, p->port_num, &attr);
+ ret = ib_query_port(ibdev, port_num, &attr);
if (ret)
return ret;
return sysfs_emit(buf, "0x%08x\n", attr.port_cap_flags);
}
-static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused,
- char *buf)
+static ssize_t rate_show(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *unused, char *buf)
{
struct ib_port_attr attr;
char *speed = "";
int rate; /* in deci-Gb/sec */
ssize_t ret;
- ret = ib_query_port(p->ibdev, p->port_num, &attr);
+ ret = ib_query_port(ibdev, port_num, &attr);
if (ret)
return ret;
@@ -313,27 +376,27 @@ static const char *phys_state_to_str(enum ib_port_phys_state phys_state)
return "<unknown>";
}
-static ssize_t phys_state_show(struct ib_port *p, struct port_attribute *unused,
- char *buf)
+static ssize_t phys_state_show(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *unused, char *buf)
{
struct ib_port_attr attr;
ssize_t ret;
- ret = ib_query_port(p->ibdev, p->port_num, &attr);
+ ret = ib_query_port(ibdev, port_num, &attr);
if (ret)
return ret;
- return sysfs_emit(buf, "%d: %s\n", attr.phys_state,
+ return sysfs_emit(buf, "%u: %s\n", attr.phys_state,
phys_state_to_str(attr.phys_state));
}
-static ssize_t link_layer_show(struct ib_port *p, struct port_attribute *unused,
- char *buf)
+static ssize_t link_layer_show(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *unused, char *buf)
{
const char *output;
- switch (rdma_port_get_link_layer(p->ibdev, p->port_num)) {
+ switch (rdma_port_get_link_layer(ibdev, port_num)) {
case IB_LINK_LAYER_INFINIBAND:
output = "InfiniBand";
break;
@@ -348,26 +411,26 @@ static ssize_t link_layer_show(struct ib_port *p, struct port_attribute *unused,
return sysfs_emit(buf, "%s\n", output);
}
-static PORT_ATTR_RO(state);
-static PORT_ATTR_RO(lid);
-static PORT_ATTR_RO(lid_mask_count);
-static PORT_ATTR_RO(sm_lid);
-static PORT_ATTR_RO(sm_sl);
-static PORT_ATTR_RO(cap_mask);
-static PORT_ATTR_RO(rate);
-static PORT_ATTR_RO(phys_state);
-static PORT_ATTR_RO(link_layer);
+static IB_PORT_ATTR_RO(state);
+static IB_PORT_ATTR_RO(lid);
+static IB_PORT_ATTR_RO(lid_mask_count);
+static IB_PORT_ATTR_RO(sm_lid);
+static IB_PORT_ATTR_RO(sm_sl);
+static IB_PORT_ATTR_RO(cap_mask);
+static IB_PORT_ATTR_RO(rate);
+static IB_PORT_ATTR_RO(phys_state);
+static IB_PORT_ATTR_RO(link_layer);
static struct attribute *port_default_attrs[] = {
- &port_attr_state.attr,
- &port_attr_lid.attr,
- &port_attr_lid_mask_count.attr,
- &port_attr_sm_lid.attr,
- &port_attr_sm_sl.attr,
- &port_attr_cap_mask.attr,
- &port_attr_rate.attr,
- &port_attr_phys_state.attr,
- &port_attr_link_layer.attr,
+ &ib_port_attr_state.attr,
+ &ib_port_attr_lid.attr,
+ &ib_port_attr_lid_mask_count.attr,
+ &ib_port_attr_sm_lid.attr,
+ &ib_port_attr_sm_sl.attr,
+ &ib_port_attr_cap_mask.attr,
+ &ib_port_attr_rate.attr,
+ &ib_port_attr_phys_state.attr,
+ &ib_port_attr_link_layer.attr,
NULL
};
@@ -391,7 +454,8 @@ static ssize_t print_gid_type(const struct ib_gid_attr *gid_attr, char *buf)
}
static ssize_t _show_port_gid_attr(
- struct ib_port *p, struct port_attribute *attr, char *buf,
+ struct ib_device *ibdev, u32 port_num, struct ib_port_attribute *attr,
+ char *buf,
ssize_t (*print)(const struct ib_gid_attr *gid_attr, char *buf))
{
struct port_table_attribute *tab_attr =
@@ -399,7 +463,7 @@ static ssize_t _show_port_gid_attr(
const struct ib_gid_attr *gid_attr;
ssize_t ret;
- gid_attr = rdma_get_gid_attr(p->ibdev, p->port_num, tab_attr->index);
+ gid_attr = rdma_get_gid_attr(ibdev, port_num, tab_attr->index);
if (IS_ERR(gid_attr))
/* -EINVAL is returned for user space compatibility reasons. */
return -EINVAL;
@@ -409,15 +473,15 @@ static ssize_t _show_port_gid_attr(
return ret;
}
-static ssize_t show_port_gid(struct ib_port *p, struct port_attribute *attr,
- char *buf)
+static ssize_t show_port_gid(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *attr, char *buf)
{
struct port_table_attribute *tab_attr =
container_of(attr, struct port_table_attribute, attr);
const struct ib_gid_attr *gid_attr;
int len;
- gid_attr = rdma_get_gid_attr(p->ibdev, p->port_num, tab_attr->index);
+ gid_attr = rdma_get_gid_attr(ibdev, port_num, tab_attr->index);
if (IS_ERR(gid_attr)) {
const union ib_gid zgid = {};
@@ -438,28 +502,30 @@ static ssize_t show_port_gid(struct ib_port *p, struct port_attribute *attr,
return len;
}
-static ssize_t show_port_gid_attr_ndev(struct ib_port *p,
- struct port_attribute *attr, char *buf)
+static ssize_t show_port_gid_attr_ndev(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *attr,
+ char *buf)
{
- return _show_port_gid_attr(p, attr, buf, print_ndev);
+ return _show_port_gid_attr(ibdev, port_num, attr, buf, print_ndev);
}
-static ssize_t show_port_gid_attr_gid_type(struct ib_port *p,
- struct port_attribute *attr,
+static ssize_t show_port_gid_attr_gid_type(struct ib_device *ibdev,
+ u32 port_num,
+ struct ib_port_attribute *attr,
char *buf)
{
- return _show_port_gid_attr(p, attr, buf, print_gid_type);
+ return _show_port_gid_attr(ibdev, port_num, attr, buf, print_gid_type);
}
-static ssize_t show_port_pkey(struct ib_port *p, struct port_attribute *attr,
- char *buf)
+static ssize_t show_port_pkey(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *attr, char *buf)
{
struct port_table_attribute *tab_attr =
container_of(attr, struct port_table_attribute, attr);
u16 pkey;
int ret;
- ret = ib_query_pkey(p->ibdev, p->port_num, tab_attr->index, &pkey);
+ ret = ib_query_pkey(ibdev, port_num, tab_attr->index, &pkey);
if (ret)
return ret;
@@ -528,8 +594,8 @@ out:
return ret;
}
-static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr,
- char *buf)
+static ssize_t show_pma_counter(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *attr, char *buf)
{
struct port_table_attribute *tab_attr =
container_of(attr, struct port_table_attribute, attr);
@@ -539,14 +605,14 @@ static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr,
u8 data[8];
int len;
- ret = get_perf_mad(p->ibdev, p->port_num, tab_attr->attr_id, &data,
+ ret = get_perf_mad(ibdev, port_num, tab_attr->attr_id, &data,
40 + offset / 8, sizeof(data));
if (ret < 0)
return ret;
switch (width) {
case 4:
- len = sysfs_emit(buf, "%u\n",
+ len = sysfs_emit(buf, "%d\n",
(*data >> (4 - (offset % 8))) & 0xf);
break;
case 8:
@@ -683,54 +749,26 @@ static const struct attribute_group pma_group_noietf = {
static void ib_port_release(struct kobject *kobj)
{
- struct ib_port *p = container_of(kobj, struct ib_port, kobj);
- struct attribute *a;
+ struct ib_port *port = container_of(kobj, struct ib_port, kobj);
int i;
- if (p->gid_group.attrs) {
- for (i = 0; (a = p->gid_group.attrs[i]); ++i)
- kfree(a);
-
- kfree(p->gid_group.attrs);
- }
-
- if (p->pkey_group) {
- if (p->pkey_group->attrs) {
- for (i = 0; (a = p->pkey_group->attrs[i]); ++i)
- kfree(a);
-
- kfree(p->pkey_group->attrs);
- }
-
- kfree(p->pkey_group);
- p->pkey_group = NULL;
- }
-
- kfree(p);
+ for (i = 0; i != ARRAY_SIZE(port->groups); i++)
+ kfree(port->groups[i].attrs);
+ if (port->hw_stats_data)
+ kfree(port->hw_stats_data->stats);
+ kfree(port->hw_stats_data);
+ kfree(port);
}
static void ib_port_gid_attr_release(struct kobject *kobj)
{
- struct gid_attr_group *g = container_of(kobj, struct gid_attr_group,
- kobj);
- struct attribute *a;
+ struct gid_attr_group *gid_attr_group =
+ container_of(kobj, struct gid_attr_group, kobj);
int i;
- if (g->ndev.attrs) {
- for (i = 0; (a = g->ndev.attrs[i]); ++i)
- kfree(a);
-
- kfree(g->ndev.attrs);
- }
-
- if (g->type.attrs) {
- for (i = 0; (a = g->type.attrs[i]); ++i)
- kfree(a);
-
- kfree(g->type.attrs);
- }
-
- kfree(g);
+ for (i = 0; i != ARRAY_SIZE(gid_attr_group->groups); i++)
+ kfree(gid_attr_group->groups[i].attrs);
+ kfree(gid_attr_group);
}
static struct kobj_type port_type = {
@@ -744,49 +782,6 @@ static struct kobj_type gid_attr_type = {
.release = ib_port_gid_attr_release
};
-static struct attribute **
-alloc_group_attrs(ssize_t (*show)(struct ib_port *,
- struct port_attribute *, char *buf),
- int len)
-{
- struct attribute **tab_attr;
- struct port_table_attribute *element;
- int i;
-
- tab_attr = kcalloc(1 + len, sizeof(struct attribute *), GFP_KERNEL);
- if (!tab_attr)
- return NULL;
-
- for (i = 0; i < len; i++) {
- element = kzalloc(sizeof(struct port_table_attribute),
- GFP_KERNEL);
- if (!element)
- goto err;
-
- if (snprintf(element->name, sizeof(element->name),
- "%d", i) >= sizeof(element->name)) {
- kfree(element);
- goto err;
- }
-
- element->attr.attr.name = element->name;
- element->attr.attr.mode = S_IRUGO;
- element->attr.show = show;
- element->index = i;
- sysfs_attr_init(&element->attr.attr);
-
- tab_attr[i] = &element->attr.attr;
- }
-
- return tab_attr;
-
-err:
- while (--i >= 0)
- kfree(tab_attr[i]);
- kfree(tab_attr);
- return NULL;
-}
-
/*
* Figure out which counter table to use depending on
* the device capabilities.
@@ -835,56 +830,30 @@ static int print_hw_stat(struct ib_device *dev, int port_num,
return sysfs_emit(buf, "%llu\n", stats->value[index] + v);
}
-static ssize_t show_hw_stats(struct kobject *kobj, struct attribute *attr,
- char *buf)
+static ssize_t show_hw_stats(struct ib_device *ibdev,
+ struct rdma_hw_stats *stats, unsigned int index,
+ unsigned int port_num, char *buf)
{
- struct ib_device *dev;
- struct ib_port *port;
- struct hw_stats_attribute *hsa;
- struct rdma_hw_stats *stats;
int ret;
- hsa = container_of(attr, struct hw_stats_attribute, attr);
- if (!hsa->port_num) {
- dev = container_of((struct device *)kobj,
- struct ib_device, dev);
- stats = dev->hw_stats;
- } else {
- port = container_of(kobj, struct ib_port, kobj);
- dev = port->ibdev;
- stats = port->hw_stats;
- }
mutex_lock(&stats->lock);
- ret = update_hw_stats(dev, stats, hsa->port_num, hsa->index);
+ ret = update_hw_stats(ibdev, stats, port_num, index);
if (ret)
goto unlock;
- ret = print_hw_stat(dev, hsa->port_num, stats, hsa->index, buf);
+ ret = print_hw_stat(ibdev, port_num, stats, index, buf);
unlock:
mutex_unlock(&stats->lock);
return ret;
}
-static ssize_t show_stats_lifespan(struct kobject *kobj,
- struct attribute *attr,
+static ssize_t show_stats_lifespan(struct ib_device *ibdev,
+ struct rdma_hw_stats *stats,
+ unsigned int index, unsigned int port_num,
char *buf)
{
- struct hw_stats_attribute *hsa;
- struct rdma_hw_stats *stats;
int msecs;
- hsa = container_of(attr, struct hw_stats_attribute, attr);
- if (!hsa->port_num) {
- struct ib_device *dev = container_of((struct device *)kobj,
- struct ib_device, dev);
-
- stats = dev->hw_stats;
- } else {
- struct ib_port *p = container_of(kobj, struct ib_port, kobj);
-
- stats = p->hw_stats;
- }
-
mutex_lock(&stats->lock);
msecs = jiffies_to_msecs(stats->lifespan);
mutex_unlock(&stats->lock);
@@ -892,12 +861,11 @@ static ssize_t show_stats_lifespan(struct kobject *kobj,
return sysfs_emit(buf, "%d\n", msecs);
}
-static ssize_t set_stats_lifespan(struct kobject *kobj,
- struct attribute *attr,
- const char *buf, size_t count)
+static ssize_t set_stats_lifespan(struct ib_device *ibdev,
+ struct rdma_hw_stats *stats,
+ unsigned int index, unsigned int port_num,
+ const char *buf, size_t count)
{
- struct hw_stats_attribute *hsa;
- struct rdma_hw_stats *stats;
int msecs;
int jiffies;
int ret;
@@ -908,17 +876,6 @@ static ssize_t set_stats_lifespan(struct kobject *kobj,
if (msecs < 0 || msecs > 10000)
return -EINVAL;
jiffies = msecs_to_jiffies(msecs);
- hsa = container_of(attr, struct hw_stats_attribute, attr);
- if (!hsa->port_num) {
- struct ib_device *dev = container_of((struct device *)kobj,
- struct ib_device, dev);
-
- stats = dev->hw_stats;
- } else {
- struct ib_port *p = container_of(kobj, struct ib_port, kobj);
-
- stats = p->hw_stats;
- }
mutex_lock(&stats->lock);
stats->lifespan = jiffies;
@@ -927,65 +884,116 @@ static ssize_t set_stats_lifespan(struct kobject *kobj,
return count;
}
-static void free_hsag(struct kobject *kobj, struct attribute_group *attr_group)
+static struct hw_stats_device_data *
+alloc_hw_stats_device(struct ib_device *ibdev)
{
- struct attribute **attr;
-
- sysfs_remove_group(kobj, attr_group);
+ struct hw_stats_device_data *data;
+ struct rdma_hw_stats *stats;
- for (attr = attr_group->attrs; *attr; attr++)
- kfree(*attr);
- kfree(attr_group);
-}
+ if (!ibdev->ops.alloc_hw_device_stats)
+ return ERR_PTR(-EOPNOTSUPP);
+ stats = ibdev->ops.alloc_hw_device_stats(ibdev);
+ if (!stats)
+ return ERR_PTR(-ENOMEM);
+ if (!stats->names || stats->num_counters <= 0)
+ goto err_free_stats;
-static struct attribute *alloc_hsa(int index, u32 port_num, const char *name)
-{
- struct hw_stats_attribute *hsa;
+ /*
+ * Two extra attribue elements here, one for the lifespan entry and
+ * one to NULL terminate the list for the sysfs core code
+ */
+ data = kzalloc(struct_size(data, attrs, stats->num_counters + 1),
+ GFP_KERNEL);
+ if (!data)
+ goto err_free_stats;
+ data->group.attrs = kcalloc(stats->num_counters + 2,
+ sizeof(*data->group.attrs), GFP_KERNEL);
+ if (!data->group.attrs)
+ goto err_free_data;
- hsa = kmalloc(sizeof(*hsa), GFP_KERNEL);
- if (!hsa)
- return NULL;
+ mutex_init(&stats->lock);
+ data->group.name = "hw_counters";
+ data->stats = stats;
+ return data;
- hsa->attr.name = (char *)name;
- hsa->attr.mode = S_IRUGO;
- hsa->show = show_hw_stats;
- hsa->store = NULL;
- hsa->index = index;
- hsa->port_num = port_num;
+err_free_data:
+ kfree(data);
+err_free_stats:
+ kfree(stats);
+ return ERR_PTR(-ENOMEM);
+}
- return &hsa->attr;
+void ib_device_release_hw_stats(struct hw_stats_device_data *data)
+{
+ kfree(data->group.attrs);
+ kfree(data->stats);
+ kfree(data);
}
-static struct attribute *alloc_hsa_lifespan(char *name, u32 port_num)
+int ib_setup_device_attrs(struct ib_device *ibdev)
{
- struct hw_stats_attribute *hsa;
+ struct hw_stats_device_attribute *attr;
+ struct hw_stats_device_data *data;
+ int i, ret;
- hsa = kmalloc(sizeof(*hsa), GFP_KERNEL);
- if (!hsa)
- return NULL;
+ data = alloc_hw_stats_device(ibdev);
+ if (IS_ERR(data)) {
+ if (PTR_ERR(data) == -EOPNOTSUPP)
+ return 0;
+ return PTR_ERR(data);
+ }
+ ibdev->hw_stats_data = data;
- hsa->attr.name = name;
- hsa->attr.mode = S_IWUSR | S_IRUGO;
- hsa->show = show_stats_lifespan;
- hsa->store = set_stats_lifespan;
- hsa->index = 0;
- hsa->port_num = port_num;
+ ret = ibdev->ops.get_hw_stats(ibdev, data->stats, 0,
+ data->stats->num_counters);
+ if (ret != data->stats->num_counters) {
+ if (WARN_ON(ret >= 0))
+ return -EINVAL;
+ return ret;
+ }
+
+ data->stats->timestamp = jiffies;
+
+ for (i = 0; i < data->stats->num_counters; i++) {
+ attr = &data->attrs[i];
+ sysfs_attr_init(&attr->attr.attr);
+ attr->attr.attr.name = data->stats->names[i];
+ attr->attr.attr.mode = 0444;
+ attr->attr.show = hw_stat_device_show;
+ attr->show = show_hw_stats;
+ data->group.attrs[i] = &attr->attr.attr;
+ }
- return &hsa->attr;
+ attr = &data->attrs[i];
+ sysfs_attr_init(&attr->attr.attr);
+ attr->attr.attr.name = "lifespan";
+ attr->attr.attr.mode = 0644;
+ attr->attr.show = hw_stat_device_show;
+ attr->show = show_stats_lifespan;
+ attr->attr.store = hw_stat_device_store;
+ attr->store = set_stats_lifespan;
+ data->group.attrs[i] = &attr->attr.attr;
+ for (i = 0; i != ARRAY_SIZE(ibdev->groups); i++)
+ if (!ibdev->groups[i]) {
+ ibdev->groups[i] = &data->group;
+ return 0;
+ }
+ WARN(true, "struct ib_device->groups is too small");
+ return -EINVAL;
}
-static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
- u32 port_num)
+static struct hw_stats_port_data *
+alloc_hw_stats_port(struct ib_port *port, struct attribute_group *group)
{
- struct attribute_group *hsag;
+ struct ib_device *ibdev = port->ibdev;
+ struct hw_stats_port_data *data;
struct rdma_hw_stats *stats;
- int i, ret;
-
- stats = device->ops.alloc_hw_stats(device, port_num);
+ if (!ibdev->ops.alloc_hw_port_stats)
+ return ERR_PTR(-EOPNOTSUPP);
+ stats = ibdev->ops.alloc_hw_port_stats(port->ibdev, port->port_num);
if (!stats)
- return;
-
+ return ERR_PTR(-ENOMEM);
if (!stats->names || stats->num_counters <= 0)
goto err_free_stats;
@@ -993,244 +1001,275 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
* Two extra attribue elements here, one for the lifespan entry and
* one to NULL terminate the list for the sysfs core code
*/
- hsag = kzalloc(sizeof(*hsag) +
- sizeof(void *) * (stats->num_counters + 2),
+ data = kzalloc(struct_size(data, attrs, stats->num_counters + 1),
GFP_KERNEL);
- if (!hsag)
+ if (!data)
goto err_free_stats;
+ group->attrs = kcalloc(stats->num_counters + 2,
+ sizeof(*group->attrs), GFP_KERNEL);
+ if (!group->attrs)
+ goto err_free_data;
- ret = device->ops.get_hw_stats(device, stats, port_num,
- stats->num_counters);
- if (ret != stats->num_counters)
- goto err_free_hsag;
+ mutex_init(&stats->lock);
+ group->name = "hw_counters";
+ data->stats = stats;
+ return data;
- stats->timestamp = jiffies;
+err_free_data:
+ kfree(data);
+err_free_stats:
+ kfree(stats);
+ return ERR_PTR(-ENOMEM);
+}
- hsag->name = "hw_counters";
- hsag->attrs = (void *)hsag + sizeof(*hsag);
+static int setup_hw_port_stats(struct ib_port *port,
+ struct attribute_group *group)
+{
+ struct hw_stats_port_attribute *attr;
+ struct hw_stats_port_data *data;
+ int i, ret;
- for (i = 0; i < stats->num_counters; i++) {
- hsag->attrs[i] = alloc_hsa(i, port_num, stats->names[i]);
- if (!hsag->attrs[i])
- goto err;
- sysfs_attr_init(hsag->attrs[i]);
+ data = alloc_hw_stats_port(port, group);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ ret = port->ibdev->ops.get_hw_stats(port->ibdev, data->stats,
+ port->port_num,
+ data->stats->num_counters);
+ if (ret != data->stats->num_counters) {
+ if (WARN_ON(ret >= 0))
+ return -EINVAL;
+ return ret;
}
- mutex_init(&stats->lock);
- /* treat an error here as non-fatal */
- hsag->attrs[i] = alloc_hsa_lifespan("lifespan", port_num);
- if (hsag->attrs[i])
- sysfs_attr_init(hsag->attrs[i]);
-
- if (port) {
- struct kobject *kobj = &port->kobj;
- ret = sysfs_create_group(kobj, hsag);
- if (ret)
- goto err;
- port->hw_stats_ag = hsag;
- port->hw_stats = stats;
- if (device->port_data)
- device->port_data[port_num].hw_stats = stats;
- } else {
- struct kobject *kobj = &device->dev.kobj;
- ret = sysfs_create_group(kobj, hsag);
- if (ret)
- goto err;
- device->hw_stats_ag = hsag;
- device->hw_stats = stats;
+ data->stats->timestamp = jiffies;
+
+ for (i = 0; i < data->stats->num_counters; i++) {
+ attr = &data->attrs[i];
+ sysfs_attr_init(&attr->attr.attr);
+ attr->attr.attr.name = data->stats->names[i];
+ attr->attr.attr.mode = 0444;
+ attr->attr.show = hw_stat_port_show;
+ attr->show = show_hw_stats;
+ group->attrs[i] = &attr->attr.attr;
}
- return;
+ attr = &data->attrs[i];
+ sysfs_attr_init(&attr->attr.attr);
+ attr->attr.attr.name = "lifespan";
+ attr->attr.attr.mode = 0644;
+ attr->attr.show = hw_stat_port_show;
+ attr->show = show_stats_lifespan;
+ attr->attr.store = hw_stat_port_store;
+ attr->store = set_stats_lifespan;
+ group->attrs[i] = &attr->attr.attr;
+
+ port->hw_stats_data = data;
+ return 0;
+}
-err:
- for (; i >= 0; i--)
- kfree(hsag->attrs[i]);
-err_free_hsag:
- kfree(hsag);
-err_free_stats:
- kfree(stats);
+struct rdma_hw_stats *ib_get_hw_stats_port(struct ib_device *ibdev,
+ u32 port_num)
+{
+ if (!ibdev->port_data || !rdma_is_port_valid(ibdev, port_num) ||
+ !ibdev->port_data[port_num].sysfs->hw_stats_data)
+ return NULL;
+ return ibdev->port_data[port_num].sysfs->hw_stats_data->stats;
}
-static int add_port(struct ib_core_device *coredev, int port_num)
+static int
+alloc_port_table_group(const char *name, struct attribute_group *group,
+ struct port_table_attribute *attrs, size_t num,
+ ssize_t (*show)(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *, char *buf))
{
- struct ib_device *device = rdma_device_to_ibdev(&coredev->dev);
- bool is_full_dev = &device->coredev == coredev;
- struct ib_port *p;
- struct ib_port_attr attr;
+ struct attribute **attr_list;
int i;
- int ret;
-
- ret = ib_query_port(device, port_num, &attr);
- if (ret)
- return ret;
- p = kzalloc(sizeof *p, GFP_KERNEL);
- if (!p)
+ attr_list = kcalloc(num + 1, sizeof(*attr_list), GFP_KERNEL);
+ if (!attr_list)
return -ENOMEM;
- p->ibdev = device;
- p->port_num = port_num;
-
- ret = kobject_init_and_add(&p->kobj, &port_type,
- coredev->ports_kobj,
- "%d", port_num);
- if (ret)
- goto err_put;
+ for (i = 0; i < num; i++) {
+ struct port_table_attribute *element = &attrs[i];
- p->gid_attr_group = kzalloc(sizeof(*p->gid_attr_group), GFP_KERNEL);
- if (!p->gid_attr_group) {
- ret = -ENOMEM;
- goto err_put;
- }
-
- p->gid_attr_group->port = p;
- ret = kobject_init_and_add(&p->gid_attr_group->kobj, &gid_attr_type,
- &p->kobj, "gid_attrs");
- if (ret)
- goto err_put_gid_attrs;
+ if (snprintf(element->name, sizeof(element->name), "%d", i) >=
+ sizeof(element->name))
+ goto err;
- if (device->ops.process_mad && is_full_dev) {
- p->pma_table = get_counter_table(device, port_num);
- ret = sysfs_create_group(&p->kobj, p->pma_table);
- if (ret)
- goto err_put_gid_attrs;
- }
+ sysfs_attr_init(&element->attr.attr);
+ element->attr.attr.name = element->name;
+ element->attr.attr.mode = 0444;
+ element->attr.show = show;
+ element->index = i;
- p->gid_group.name = "gids";
- p->gid_group.attrs = alloc_group_attrs(show_port_gid, attr.gid_tbl_len);
- if (!p->gid_group.attrs) {
- ret = -ENOMEM;
- goto err_remove_pma;
+ attr_list[i] = &element->attr.attr;
}
+ group->name = name;
+ group->attrs = attr_list;
+ return 0;
+err:
+ kfree(attr_list);
+ return -EINVAL;
+}
- ret = sysfs_create_group(&p->kobj, &p->gid_group);
- if (ret)
- goto err_free_gid;
+/*
+ * Create the sysfs:
+ * ibp0s9/ports/XX/gid_attrs/{ndevs,types}/YYY
+ * YYY is the gid table index in decimal
+ */
+static int setup_gid_attrs(struct ib_port *port,
+ const struct ib_port_attr *attr)
+{
+ struct gid_attr_group *gid_attr_group;
+ int ret;
- p->gid_attr_group->ndev.name = "ndevs";
- p->gid_attr_group->ndev.attrs = alloc_group_attrs(show_port_gid_attr_ndev,
- attr.gid_tbl_len);
- if (!p->gid_attr_group->ndev.attrs) {
- ret = -ENOMEM;
- goto err_remove_gid;
- }
+ gid_attr_group = kzalloc(struct_size(gid_attr_group, attrs_list,
+ attr->gid_tbl_len * 2),
+ GFP_KERNEL);
+ if (!gid_attr_group)
+ return -ENOMEM;
+ gid_attr_group->port = port;
+ kobject_init(&gid_attr_group->kobj, &gid_attr_type);
- ret = sysfs_create_group(&p->gid_attr_group->kobj,
- &p->gid_attr_group->ndev);
+ ret = alloc_port_table_group("ndevs", &gid_attr_group->groups[0],
+ gid_attr_group->attrs_list,
+ attr->gid_tbl_len,
+ show_port_gid_attr_ndev);
if (ret)
- goto err_free_gid_ndev;
+ goto err_put;
+ gid_attr_group->groups_list[0] = &gid_attr_group->groups[0];
- p->gid_attr_group->type.name = "types";
- p->gid_attr_group->type.attrs = alloc_group_attrs(show_port_gid_attr_gid_type,
- attr.gid_tbl_len);
- if (!p->gid_attr_group->type.attrs) {
- ret = -ENOMEM;
- goto err_remove_gid_ndev;
- }
+ ret = alloc_port_table_group(
+ "types", &gid_attr_group->groups[1],
+ gid_attr_group->attrs_list + attr->gid_tbl_len,
+ attr->gid_tbl_len, show_port_gid_attr_gid_type);
+ if (ret)
+ goto err_put;
+ gid_attr_group->groups_list[1] = &gid_attr_group->groups[1];
- ret = sysfs_create_group(&p->gid_attr_group->kobj,
- &p->gid_attr_group->type);
+ ret = kobject_add(&gid_attr_group->kobj, &port->kobj, "gid_attrs");
+ if (ret)
+ goto err_put;
+ ret = sysfs_create_groups(&gid_attr_group->kobj,
+ gid_attr_group->groups_list);
if (ret)
- goto err_free_gid_type;
+ goto err_del;
+ port->gid_attr_group = gid_attr_group;
+ return 0;
- if (attr.pkey_tbl_len) {
- p->pkey_group = kzalloc(sizeof(*p->pkey_group), GFP_KERNEL);
- if (!p->pkey_group) {
- ret = -ENOMEM;
- goto err_remove_gid_type;
- }
+err_del:
+ kobject_del(&gid_attr_group->kobj);
+err_put:
+ kobject_put(&gid_attr_group->kobj);
+ return ret;
+}
- p->pkey_group->name = "pkeys";
- p->pkey_group->attrs = alloc_group_attrs(show_port_pkey,
- attr.pkey_tbl_len);
- if (!p->pkey_group->attrs) {
- ret = -ENOMEM;
- goto err_free_pkey_group;
- }
+static void destroy_gid_attrs(struct ib_port *port)
+{
+ struct gid_attr_group *gid_attr_group = port->gid_attr_group;
- ret = sysfs_create_group(&p->kobj, p->pkey_group);
- if (ret)
- goto err_free_pkey;
- }
+ if (!gid_attr_group)
+ return;
+ sysfs_remove_groups(&gid_attr_group->kobj, gid_attr_group->groups_list);
+ kobject_del(&gid_attr_group->kobj);
+ kobject_put(&gid_attr_group->kobj);
+}
+
+/*
+ * Create the sysfs:
+ * ibp0s9/ports/XX/{gids,pkeys,counters}/YYY
+ */
+static struct ib_port *setup_port(struct ib_core_device *coredev, int port_num,
+ const struct ib_port_attr *attr)
+{
+ struct ib_device *device = rdma_device_to_ibdev(&coredev->dev);
+ bool is_full_dev = &device->coredev == coredev;
+ const struct attribute_group **cur_group;
+ struct ib_port *p;
+ int ret;
+ p = kzalloc(struct_size(p, attrs_list,
+ attr->gid_tbl_len + attr->pkey_tbl_len),
+ GFP_KERNEL);
+ if (!p)
+ return ERR_PTR(-ENOMEM);
+ p->ibdev = device;
+ p->port_num = port_num;
+ kobject_init(&p->kobj, &port_type);
+
+ cur_group = p->groups_list;
+ ret = alloc_port_table_group("gids", &p->groups[0], p->attrs_list,
+ attr->gid_tbl_len, show_port_gid);
+ if (ret)
+ goto err_put;
+ *cur_group++ = &p->groups[0];
- if (device->ops.init_port && is_full_dev) {
- ret = device->ops.init_port(device, port_num, &p->kobj);
+ if (attr->pkey_tbl_len) {
+ ret = alloc_port_table_group("pkeys", &p->groups[1],
+ p->attrs_list + attr->gid_tbl_len,
+ attr->pkey_tbl_len, show_port_pkey);
if (ret)
- goto err_remove_pkey;
+ goto err_put;
+ *cur_group++ = &p->groups[1];
}
/*
* If port == 0, it means hw_counters are per device and not per
- * port, so holder should be device. Therefore skip per port conunter
- * initialization.
+ * port, so holder should be device. Therefore skip per port
+ * counter initialization.
*/
- if (device->ops.alloc_hw_stats && port_num && is_full_dev)
- setup_hw_stats(device, p, port_num);
-
- list_add_tail(&p->kobj.entry, &coredev->port_list);
-
- kobject_uevent(&p->kobj, KOBJ_ADD);
- return 0;
-
-err_remove_pkey:
- if (p->pkey_group)
- sysfs_remove_group(&p->kobj, p->pkey_group);
-
-err_free_pkey:
- if (p->pkey_group) {
- for (i = 0; i < attr.pkey_tbl_len; ++i)
- kfree(p->pkey_group->attrs[i]);
-
- kfree(p->pkey_group->attrs);
- p->pkey_group->attrs = NULL;
+ if (port_num && is_full_dev) {
+ ret = setup_hw_port_stats(p, &p->groups[2]);
+ if (ret && ret != -EOPNOTSUPP)
+ goto err_put;
+ if (!ret)
+ *cur_group++ = &p->groups[2];
}
-err_free_pkey_group:
- kfree(p->pkey_group);
-
-err_remove_gid_type:
- sysfs_remove_group(&p->gid_attr_group->kobj,
- &p->gid_attr_group->type);
-
-err_free_gid_type:
- for (i = 0; i < attr.gid_tbl_len; ++i)
- kfree(p->gid_attr_group->type.attrs[i]);
-
- kfree(p->gid_attr_group->type.attrs);
- p->gid_attr_group->type.attrs = NULL;
-
-err_remove_gid_ndev:
- sysfs_remove_group(&p->gid_attr_group->kobj,
- &p->gid_attr_group->ndev);
-
-err_free_gid_ndev:
- for (i = 0; i < attr.gid_tbl_len; ++i)
- kfree(p->gid_attr_group->ndev.attrs[i]);
+ if (device->ops.process_mad && is_full_dev)
+ *cur_group++ = get_counter_table(device, port_num);
- kfree(p->gid_attr_group->ndev.attrs);
- p->gid_attr_group->ndev.attrs = NULL;
-
-err_remove_gid:
- sysfs_remove_group(&p->kobj, &p->gid_group);
-
-err_free_gid:
- for (i = 0; i < attr.gid_tbl_len; ++i)
- kfree(p->gid_group.attrs[i]);
-
- kfree(p->gid_group.attrs);
- p->gid_group.attrs = NULL;
+ ret = kobject_add(&p->kobj, coredev->ports_kobj, "%d", port_num);
+ if (ret)
+ goto err_put;
+ ret = sysfs_create_groups(&p->kobj, p->groups_list);
+ if (ret)
+ goto err_del;
+ if (is_full_dev) {
+ ret = sysfs_create_groups(&p->kobj, device->ops.port_groups);
+ if (ret)
+ goto err_groups;
+ }
-err_remove_pma:
- if (p->pma_table)
- sysfs_remove_group(&p->kobj, p->pma_table);
+ list_add_tail(&p->kobj.entry, &coredev->port_list);
+ if (device->port_data && is_full_dev)
+ device->port_data[port_num].sysfs = p;
-err_put_gid_attrs:
- kobject_put(&p->gid_attr_group->kobj);
+ return p;
+err_groups:
+ sysfs_remove_groups(&p->kobj, p->groups_list);
+err_del:
+ kobject_del(&p->kobj);
err_put:
kobject_put(&p->kobj);
- return ret;
+ return ERR_PTR(ret);
+}
+
+static void destroy_port(struct ib_core_device *coredev, struct ib_port *port)
+{
+ bool is_full_dev = &port->ibdev->coredev == coredev;
+
+ if (port->ibdev->port_data &&
+ port->ibdev->port_data[port->port_num].sysfs == port)
+ port->ibdev->port_data[port->port_num].sysfs = NULL;
+ list_del(&port->kobj.entry);
+ if (is_full_dev)
+ sysfs_remove_groups(&port->kobj, port->ibdev->ops.port_groups);
+ sysfs_remove_groups(&port->kobj, port->groups_list);
+ kobject_del(&port->kobj);
+ kobject_put(&port->kobj);
}
static const char *node_type_string(int node_type)
@@ -1259,7 +1298,7 @@ static ssize_t node_type_show(struct device *device,
{
struct ib_device *dev = rdma_device_to_ibdev(device);
- return sysfs_emit(buf, "%d: %s\n", dev->node_type,
+ return sysfs_emit(buf, "%u: %s\n", dev->node_type,
node_type_string(dev->node_type));
}
static DEVICE_ATTR_RO(node_type);
@@ -1347,31 +1386,13 @@ const struct attribute_group ib_dev_attr_group = {
void ib_free_port_attrs(struct ib_core_device *coredev)
{
- struct ib_device *device = rdma_device_to_ibdev(&coredev->dev);
- bool is_full_dev = &device->coredev == coredev;
struct kobject *p, *t;
list_for_each_entry_safe(p, t, &coredev->port_list, entry) {
struct ib_port *port = container_of(p, struct ib_port, kobj);
- list_del(&p->entry);
- if (port->hw_stats_ag)
- free_hsag(&port->kobj, port->hw_stats_ag);
- kfree(port->hw_stats);
- if (device->port_data && is_full_dev)
- device->port_data[port->port_num].hw_stats = NULL;
-
- if (port->pma_table)
- sysfs_remove_group(p, port->pma_table);
- if (port->pkey_group)
- sysfs_remove_group(p, port->pkey_group);
- sysfs_remove_group(p, &port->gid_group);
- sysfs_remove_group(&port->gid_attr_group->kobj,
- &port->gid_attr_group->ndev);
- sysfs_remove_group(&port->gid_attr_group->kobj,
- &port->gid_attr_group->type);
- kobject_put(&port->gid_attr_group->kobj);
- kobject_put(p);
+ destroy_gid_attrs(port);
+ destroy_port(coredev, port);
}
kobject_put(coredev->ports_kobj);
@@ -1380,7 +1401,7 @@ void ib_free_port_attrs(struct ib_core_device *coredev)
int ib_setup_port_attrs(struct ib_core_device *coredev)
{
struct ib_device *device = rdma_device_to_ibdev(&coredev->dev);
- u32 port;
+ u32 port_num;
int ret;
coredev->ports_kobj = kobject_create_and_add("ports",
@@ -1388,12 +1409,24 @@ int ib_setup_port_attrs(struct ib_core_device *coredev)
if (!coredev->ports_kobj)
return -ENOMEM;
- rdma_for_each_port (device, port) {
- ret = add_port(coredev, port);
+ rdma_for_each_port (device, port_num) {
+ struct ib_port_attr attr;
+ struct ib_port *port;
+
+ ret = ib_query_port(device, port_num, &attr);
if (ret)
goto err_put;
- }
+ port = setup_port(coredev, port_num, &attr);
+ if (IS_ERR(port)) {
+ ret = PTR_ERR(port);
+ goto err_put;
+ }
+
+ ret = setup_gid_attrs(port, &attr);
+ if (ret)
+ goto err_put;
+ }
return 0;
err_put:
@@ -1401,70 +1434,27 @@ err_put:
return ret;
}
-int ib_device_register_sysfs(struct ib_device *device)
-{
- int ret;
-
- ret = ib_setup_port_attrs(&device->coredev);
- if (ret)
- return ret;
-
- if (device->ops.alloc_hw_stats)
- setup_hw_stats(device, NULL, 0);
-
- return 0;
-}
-
-void ib_device_unregister_sysfs(struct ib_device *device)
-{
- if (device->hw_stats_ag)
- free_hsag(&device->dev.kobj, device->hw_stats_ag);
- kfree(device->hw_stats);
-
- ib_free_port_attrs(&device->coredev);
-}
-
/**
- * ib_port_register_module_stat - add module counters under relevant port
- * of IB device.
+ * ib_port_register_client_groups - Add an ib_client's attributes to the port
*
- * @device: IB device to add counters
+ * @ibdev: IB device to add counters
* @port_num: valid port number
- * @kobj: pointer to the kobject to initialize
- * @ktype: pointer to the ktype for this kobject.
- * @name: the name of the kobject
+ * @groups: Group list of attributes
+ *
+ * Do not use. Only for legacy sysfs compatibility.
*/
-int ib_port_register_module_stat(struct ib_device *device, u32 port_num,
- struct kobject *kobj, struct kobj_type *ktype,
- const char *name)
+int ib_port_register_client_groups(struct ib_device *ibdev, u32 port_num,
+ const struct attribute_group **groups)
{
- struct kobject *p, *t;
- int ret;
-
- list_for_each_entry_safe(p, t, &device->coredev.port_list, entry) {
- struct ib_port *port = container_of(p, struct ib_port, kobj);
-
- if (port->port_num != port_num)
- continue;
-
- ret = kobject_init_and_add(kobj, ktype, &port->kobj, "%s",
- name);
- if (ret) {
- kobject_put(kobj);
- return ret;
- }
- }
-
- return 0;
+ return sysfs_create_groups(&ibdev->port_data[port_num].sysfs->kobj,
+ groups);
}
-EXPORT_SYMBOL(ib_port_register_module_stat);
+EXPORT_SYMBOL(ib_port_register_client_groups);
-/**
- * ib_port_unregister_module_stat - release module counters
- * @kobj: pointer to the kobject to release
- */
-void ib_port_unregister_module_stat(struct kobject *kobj)
+void ib_port_unregister_client_groups(struct ib_device *ibdev, u32 port_num,
+ const struct attribute_group **groups)
{
- kobject_put(kobj);
+ return sysfs_remove_groups(&ibdev->port_data[port_num].sysfs->kobj,
+ groups);
}
-EXPORT_SYMBOL(ib_port_unregister_module_stat);
+EXPORT_SYMBOL(ib_port_unregister_client_groups);
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 15d57ba4d07a..2b72c4fa9550 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -468,8 +468,8 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
resp.id = ctx->id;
if (copy_to_user(u64_to_user_ptr(cmd.response),
&resp, sizeof(resp))) {
- ucma_destroy_private_ctx(ctx);
- return -EFAULT;
+ ret = -EFAULT;
+ goto err1;
}
mutex_lock(&file->mut);
@@ -1830,13 +1830,12 @@ static struct ib_client rdma_cma_client = {
};
MODULE_ALIAS_RDMA_CLIENT("rdma_cm");
-static ssize_t show_abi_version(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t abi_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
}
-static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
+static DEVICE_ATTR_RO(abi_version);
static int __init ucma_init(void)
{
diff --git a/drivers/infiniband/core/ud_header.c b/drivers/infiniband/core/ud_header.c
index d65d541b9a25..64d9c492de64 100644
--- a/drivers/infiniband/core/ud_header.c
+++ b/drivers/infiniband/core/ud_header.c
@@ -479,7 +479,7 @@ int ib_ud_header_unpack(void *buf,
buf += IB_LRH_BYTES;
if (header->lrh.link_version != 0) {
- pr_warn("Invalid LRH.link_version %d\n",
+ pr_warn("Invalid LRH.link_version %u\n",
header->lrh.link_version);
return -EINVAL;
}
@@ -496,7 +496,7 @@ int ib_ud_header_unpack(void *buf,
buf += IB_GRH_BYTES;
if (header->grh.ip_version != 6) {
- pr_warn("Invalid GRH.ip_version %d\n",
+ pr_warn("Invalid GRH.ip_version %u\n",
header->grh.ip_version);
return -EINVAL;
}
@@ -508,7 +508,7 @@ int ib_ud_header_unpack(void *buf,
break;
default:
- pr_warn("Invalid LRH.link_next_header %d\n",
+ pr_warn("Invalid LRH.link_next_header %u\n",
header->lrh.link_next_header);
return -EINVAL;
}
@@ -530,7 +530,7 @@ int ib_ud_header_unpack(void *buf,
}
if (header->bth.transport_header_version != 0) {
- pr_warn("Invalid BTH.transport_header_version %d\n",
+ pr_warn("Invalid BTH.transport_header_version %u\n",
header->bth.transport_header_version);
return -EINVAL;
}
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 323f6cf00682..9462dbe66014 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -445,7 +445,7 @@ retry:
if (hmm_order + PAGE_SHIFT < page_shift) {
ret = -EINVAL;
ibdev_dbg(umem_odp->umem.ibdev,
- "%s: un-expected hmm_order %d, page_shift %d\n",
+ "%s: un-expected hmm_order %u, page_shift %u\n",
__func__, hmm_order, page_shift);
break;
}
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 852efedda798..98cb594cd9a6 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -700,7 +700,7 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg,
if (ureq.qpn != 0 && ureq.qpn != 1) {
dev_notice(&file->port->dev,
- "%s: invalid QPN %d specified\n", __func__,
+ "%s: invalid QPN %u specified\n", __func__,
ureq.qpn);
ret = -EINVAL;
goto out;
@@ -800,7 +800,7 @@ static int ib_umad_reg_agent2(struct ib_umad_file *file, void __user *arg)
}
if (ureq.qpn != 0 && ureq.qpn != 1) {
- dev_notice(&file->port->dev, "%s: invalid QPN %d specified\n",
+ dev_notice(&file->port->dev, "%s: invalid QPN %u specified\n",
__func__, ureq.qpn);
ret = -EINVAL;
goto out;
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index 53a10479958b..821d93c8f712 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -97,7 +97,7 @@ ib_uverbs_init_udata_buf_or_null(struct ib_udata *udata,
*/
struct ib_uverbs_device {
- atomic_t refcount;
+ refcount_t refcount;
u32 num_comp_vectors;
struct completion comp;
struct device dev;
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 64e4be1cbec7..8c8ca7bce3ca 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -3034,12 +3034,29 @@ static int ib_uverbs_ex_modify_wq(struct uverbs_attr_bundle *attrs)
if (!wq)
return -EINVAL;
- wq_attr.curr_wq_state = cmd.curr_wq_state;
- wq_attr.wq_state = cmd.wq_state;
if (cmd.attr_mask & IB_WQ_FLAGS) {
wq_attr.flags = cmd.flags;
wq_attr.flags_mask = cmd.flags_mask;
}
+
+ if (cmd.attr_mask & IB_WQ_CUR_STATE) {
+ if (cmd.curr_wq_state > IB_WQS_ERR)
+ return -EINVAL;
+
+ wq_attr.curr_wq_state = cmd.curr_wq_state;
+ } else {
+ wq_attr.curr_wq_state = wq->state;
+ }
+
+ if (cmd.attr_mask & IB_WQ_STATE) {
+ if (cmd.wq_state > IB_WQS_ERR)
+ return -EINVAL;
+
+ wq_attr.wq_state = cmd.wq_state;
+ } else {
+ wq_attr.wq_state = wq_attr.curr_wq_state;
+ }
+
ret = wq->device->ops.modify_wq(wq, &wq_attr, cmd.attr_mask,
&attrs->driver_udata);
rdma_lookup_put_uobject(&wq->uobject->uevent.uobject,
@@ -3302,7 +3319,7 @@ static int ib_uverbs_ex_create_flow(struct uverbs_attr_bundle *attrs)
ib_spec += ((union ib_flow_spec *) ib_spec)->size;
}
if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
- pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
+ pr_warn("create flow failed, flow %d: %u bytes left from uverb cmd\n",
i, cmd.flow_attr.size);
err = -EINVAL;
goto err_free;
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index f173ecd102dc..d54434088727 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -197,7 +197,7 @@ void ib_uverbs_release_file(struct kref *ref)
module_put(ib_dev->ops.owner);
srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
- if (atomic_dec_and_test(&file->device->refcount))
+ if (refcount_dec_and_test(&file->device->refcount))
ib_uverbs_comp_dev(file->device);
if (file->default_async_file)
@@ -891,7 +891,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
int srcu_key;
dev = container_of(inode->i_cdev, struct ib_uverbs_device, cdev);
- if (!atomic_inc_not_zero(&dev->refcount))
+ if (!refcount_inc_not_zero(&dev->refcount))
return -ENXIO;
get_device(&dev->dev);
@@ -955,7 +955,7 @@ err_module:
err:
mutex_unlock(&dev->lists_mutex);
srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
- if (atomic_dec_and_test(&dev->refcount))
+ if (refcount_dec_and_test(&dev->refcount))
ib_uverbs_comp_dev(dev);
put_device(&dev->dev);
@@ -1124,7 +1124,7 @@ static int ib_uverbs_add_one(struct ib_device *device)
uverbs_dev->dev.release = ib_uverbs_release_dev;
uverbs_dev->groups[0] = &dev_attr_group;
uverbs_dev->dev.groups = uverbs_dev->groups;
- atomic_set(&uverbs_dev->refcount, 1);
+ refcount_set(&uverbs_dev->refcount, 1);
init_completion(&uverbs_dev->comp);
uverbs_dev->xrcd_tree = RB_ROOT;
mutex_init(&uverbs_dev->xrcd_tree_mutex);
@@ -1166,7 +1166,7 @@ static int ib_uverbs_add_one(struct ib_device *device)
err_uapi:
ida_free(&uverbs_ida, devnum);
err:
- if (atomic_dec_and_test(&uverbs_dev->refcount))
+ if (refcount_dec_and_test(&uverbs_dev->refcount))
ib_uverbs_comp_dev(uverbs_dev);
wait_for_completion(&uverbs_dev->comp);
put_device(&uverbs_dev->dev);
@@ -1229,7 +1229,7 @@ static void ib_uverbs_remove_one(struct ib_device *device, void *client_data)
wait_clients = 0;
}
- if (atomic_dec_and_test(&uverbs_dev->refcount))
+ if (refcount_dec_and_test(&uverbs_dev->refcount))
ib_uverbs_comp_dev(uverbs_dev);
if (wait_clients)
wait_for_completion(&uverbs_dev->comp);
diff --git a/drivers/infiniband/core/uverbs_uapi.c b/drivers/infiniband/core/uverbs_uapi.c
index 62f5bcb712cf..2f2c7646fce1 100644
--- a/drivers/infiniband/core/uverbs_uapi.c
+++ b/drivers/infiniband/core/uverbs_uapi.c
@@ -517,7 +517,7 @@ static void uapi_key_okay(u32 key)
count++;
if (uapi_key_is_attr(key))
count++;
- WARN(count != 1, "Bad count %d key=%x", count, key);
+ WARN(count != 1, "Bad count %u key=%x", count, key);
}
static void uapi_finalize_disable(struct uverbs_api *uapi)
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 2b0798151fb7..7036967e4c0b 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1834,7 +1834,7 @@ int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u16 *speed, u8 *width)
netdev_speed = lksettings.base.speed;
} else {
netdev_speed = SPEED_1000;
- pr_warn("%s speed is unknown, defaulting to %d\n", netdev->name,
+ pr_warn("%s speed is unknown, defaulting to %u\n", netdev->name,
netdev_speed);
}
@@ -2445,27 +2445,6 @@ int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata)
}
EXPORT_SYMBOL(ib_destroy_wq_user);
-/**
- * ib_modify_wq - Modifies the specified WQ.
- * @wq: The WQ to modify.
- * @wq_attr: On input, specifies the WQ attributes to modify.
- * @wq_attr_mask: A bit-mask used to specify which attributes of the WQ
- * are being modified.
- * On output, the current values of selected WQ attributes are returned.
- */
-int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
- u32 wq_attr_mask)
-{
- int err;
-
- if (!wq->device->ops.modify_wq)
- return -EOPNOTSUPP;
-
- err = wq->device->ops.modify_wq(wq, wq_attr, wq_attr_mask, NULL);
- return err;
-}
-EXPORT_SYMBOL(ib_modify_wq);
-
int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
struct ib_mr_status *mr_status)
{
diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile
index 0aeccd984889..fba0b3be903e 100644
--- a/drivers/infiniband/hw/Makefile
+++ b/drivers/infiniband/hw/Makefile
@@ -3,7 +3,7 @@ obj-$(CONFIG_INFINIBAND_MTHCA) += mthca/
obj-$(CONFIG_INFINIBAND_QIB) += qib/
obj-$(CONFIG_INFINIBAND_CXGB4) += cxgb4/
obj-$(CONFIG_INFINIBAND_EFA) += efa/
-obj-$(CONFIG_INFINIBAND_I40IW) += i40iw/
+obj-$(CONFIG_INFINIBAND_IRDMA) += irdma/
obj-$(CONFIG_MLX4_INFINIBAND) += mlx4/
obj-$(CONFIG_MLX5_INFINIBAND) += mlx5/
obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma/
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.c b/drivers/infiniband/hw/bnxt_re/hw_counters.c
index 3e54e1ae75b4..7ba07797845c 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.c
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.c
@@ -234,13 +234,10 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
return ARRAY_SIZE(bnxt_re_stat_name);
}
-struct rdma_hw_stats *bnxt_re_ib_alloc_hw_stats(struct ib_device *ibdev,
- u32 port_num)
+struct rdma_hw_stats *bnxt_re_ib_alloc_hw_port_stats(struct ib_device *ibdev,
+ u32 port_num)
{
BUILD_BUG_ON(ARRAY_SIZE(bnxt_re_stat_name) != BNXT_RE_NUM_COUNTERS);
- /* We support only per port stats */
- if (!port_num)
- return NULL;
return rdma_alloc_hw_stats_struct(bnxt_re_stat_name,
ARRAY_SIZE(bnxt_re_stat_name),
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.h b/drivers/infiniband/hw/bnxt_re/hw_counters.h
index ede048607d6c..6f2d2f91d9ff 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.h
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.h
@@ -96,8 +96,8 @@ enum bnxt_re_hw_stats {
BNXT_RE_NUM_COUNTERS
};
-struct rdma_hw_stats *bnxt_re_ib_alloc_hw_stats(struct ib_device *ibdev,
- u32 port_num);
+struct rdma_hw_stats *bnxt_re_ib_alloc_hw_port_stats(struct ib_device *ibdev,
+ u32 port_num);
int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
struct rdma_hw_stats *stats,
u32 port, int index);
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 2efaa80bfbd2..283b6b81563c 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -163,6 +163,10 @@ int bnxt_re_query_device(struct ib_device *ibdev,
ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
ib_attr->atomic_cap = IB_ATOMIC_NONE;
ib_attr->masked_atomic_cap = IB_ATOMIC_NONE;
+ if (dev_attr->is_atomic) {
+ ib_attr->atomic_cap = IB_ATOMIC_GLOB;
+ ib_attr->masked_atomic_cap = IB_ATOMIC_GLOB;
+ }
ib_attr->max_ee_rd_atom = 0;
ib_attr->max_res_rd_atom = 0;
@@ -1098,10 +1102,6 @@ static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
struct bnxt_re_srq *srq;
srq = container_of(init_attr->srq, struct bnxt_re_srq, ib_srq);
- if (!srq) {
- ibdev_err(&rdev->ibdev, "SRQ not found");
- return -EINVAL;
- }
qplqp->srq = &srq->qplib_srq;
rq->max_wqe = 0;
} else {
@@ -1279,22 +1279,12 @@ static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
/* Setup CQs */
if (init_attr->send_cq) {
cq = container_of(init_attr->send_cq, struct bnxt_re_cq, ib_cq);
- if (!cq) {
- ibdev_err(&rdev->ibdev, "Send CQ not found");
- rc = -EINVAL;
- goto out;
- }
qplqp->scq = &cq->qplib_cq;
qp->scq = cq;
}
if (init_attr->recv_cq) {
cq = container_of(init_attr->recv_cq, struct bnxt_re_cq, ib_cq);
- if (!cq) {
- ibdev_err(&rdev->ibdev, "Receive CQ not found");
- rc = -EINVAL;
- goto out;
- }
qplqp->rcq = &cq->qplib_cq;
qp->rcq = cq;
}
@@ -3473,10 +3463,6 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
((struct bnxt_qplib_qp *)
(unsigned long)(cqe->qp_handle),
struct bnxt_re_qp, qplib_qp);
- if (!qp) {
- ibdev_err(&cq->rdev->ibdev, "POLL CQ : bad QP handle");
- continue;
- }
wc->qp = &qp->ib_qp;
wc->ex.imm_data = cqe->immdata;
wc->src_qp = cqe->src_qp;
@@ -3858,7 +3844,7 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
container_of(ctx, struct bnxt_re_ucontext, ib_uctx);
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
- struct bnxt_re_uctx_resp resp;
+ struct bnxt_re_uctx_resp resp = {};
u32 chip_met_rev_num = 0;
int rc;
@@ -3886,15 +3872,15 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_metal & 0xFF) <<
BNXT_RE_CHIP_ID0_CHIP_MET_SFT;
resp.chip_id0 = chip_met_rev_num;
- /* Future extension of chip info */
- resp.chip_id1 = 0;
/*Temp, Use xa_alloc instead */
resp.dev_id = rdev->en_dev->pdev->devfn;
resp.max_qp = rdev->qplib_ctx.qpc_count;
resp.pg_size = PAGE_SIZE;
resp.cqe_sz = sizeof(struct cq_base);
resp.max_cqd = dev_attr->max_cq_wqes;
- resp.rsvd = 0;
+
+ resp.comp_mask |= BNXT_RE_UCNTX_CMASK_HAVE_MODE;
+ resp.mode = rdev->chip_ctx->modes.wqe_mode;
rc = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
if (rc) {
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 8bfbf0231a9e..d5674026512a 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -128,6 +128,9 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode)
rdev->rcfw.res = &rdev->qplib_res;
bnxt_re_set_drv_mode(rdev, wqe_mode);
+ if (bnxt_qplib_determine_atomics(en_dev->pdev))
+ ibdev_info(&rdev->ibdev,
+ "platform doesn't support global atomics.");
return 0;
}
@@ -662,7 +665,7 @@ static const struct ib_device_ops bnxt_re_dev_ops = {
.uverbs_abi_ver = BNXT_RE_ABI_VERSION,
.add_gid = bnxt_re_add_gid,
- .alloc_hw_stats = bnxt_re_ib_alloc_hw_stats,
+ .alloc_hw_port_stats = bnxt_re_ib_alloc_hw_port_stats,
.alloc_mr = bnxt_re_alloc_mr,
.alloc_pd = bnxt_re_alloc_pd,
.alloc_ucontext = bnxt_re_alloc_ucontext,
@@ -680,6 +683,7 @@ static const struct ib_device_ops bnxt_re_dev_ops = {
.destroy_cq = bnxt_re_destroy_cq,
.destroy_qp = bnxt_re_destroy_qp,
.destroy_srq = bnxt_re_destroy_srq,
+ .device_group = &bnxt_re_dev_attr_group,
.get_dev_fw_str = bnxt_re_query_fw_str,
.get_dma_mr = bnxt_re_get_dma_mr,
.get_hw_stats = bnxt_re_ib_get_hw_stats,
@@ -726,7 +730,6 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
ibdev->dev.parent = &rdev->en_dev->pdev->dev;
ibdev->local_dma_lkey = BNXT_QPLIB_RSVD_LKEY;
- rdma_set_device_sysfs_group(ibdev, &bnxt_re_dev_attr_group);
ib_set_device_ops(ibdev, &bnxt_re_dev_ops);
ret = ib_device_set_netdev(&rdev->ibdev, rdev->netdev, 1);
if (ret)
@@ -885,12 +888,6 @@ static int bnxt_re_srqn_handler(struct bnxt_qplib_nq *nq,
struct ib_event ib_event;
int rc = 0;
- if (!srq) {
- ibdev_err(NULL, "%s: SRQ is NULL, SRQN not handled",
- ROCE_DRV_MODULE_NAME);
- rc = -EINVAL;
- goto done;
- }
ib_event.device = &srq->rdev->ibdev;
ib_event.element.srq = &srq->ib_srq;
if (event == NQ_SRQ_EVENT_EVENT_SRQ_THRESHOLD_EVENT)
@@ -903,7 +900,6 @@ static int bnxt_re_srqn_handler(struct bnxt_qplib_nq *nq,
(*srq->ib_srq.event_handler)(&ib_event,
srq->ib_srq.srq_context);
}
-done:
return rc;
}
@@ -913,11 +909,6 @@ static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq,
struct bnxt_re_cq *cq = container_of(handle, struct bnxt_re_cq,
qplib_cq);
- if (!cq) {
- ibdev_err(NULL, "%s: CQ is NULL, CQN not handled",
- ROCE_DRV_MODULE_NAME);
- return -EINVAL;
- }
if (cq->ib_cq.comp_handler) {
/* Lock comp_handler? */
(*cq->ib_cq.comp_handler)(&cq->ib_cq, cq->ib_cq.cq_context);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index f50784405e27..037501952543 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -39,6 +39,8 @@
#ifndef __BNXT_QPLIB_FP_H__
#define __BNXT_QPLIB_FP_H__
+#include <rdma/bnxt_re-abi.h>
+
/* Few helper structures temporarily defined here
* should get rid of these when roce_hsi.h is updated
* in original code base
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 3ca47004b752..17f0701b3cee 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -959,3 +959,20 @@ fail:
bnxt_qplib_free_res(res);
return rc;
}
+
+int bnxt_qplib_determine_atomics(struct pci_dev *dev)
+{
+ int comp;
+ u16 ctl2;
+
+ comp = pci_enable_atomic_ops_to_root(dev,
+ PCI_EXP_DEVCAP2_ATOMIC_COMP32);
+ if (comp)
+ return -EOPNOTSUPP;
+ comp = pci_enable_atomic_ops_to_root(dev,
+ PCI_EXP_DEVCAP2_ATOMIC_COMP64);
+ if (comp)
+ return -EOPNOTSUPP;
+ pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &ctl2);
+ return !(ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ);
+}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index 7a1ab38b95da..c291f495ae91 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -45,12 +45,6 @@ extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero;
#define CHIP_NUM_57504 0x1751
#define CHIP_NUM_57502 0x1752
-enum bnxt_qplib_wqe_mode {
- BNXT_QPLIB_WQE_MODE_STATIC = 0x00,
- BNXT_QPLIB_WQE_MODE_VARIABLE = 0x01,
- BNXT_QPLIB_WQE_MODE_INVALID = 0x02
-};
-
struct bnxt_qplib_drv_modes {
u8 wqe_mode;
/* Other modes to follow here */
@@ -373,6 +367,7 @@ void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res,
int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
struct bnxt_qplib_ctx *ctx,
bool virt_fn, bool is_p5);
+int bnxt_qplib_determine_atomics(struct pci_dev *dev);
static inline void bnxt_qplib_hwq_incr_prod(struct bnxt_qplib_hwq *hwq, u32 cnt)
{
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 049b3576302b..3d9259632eb3 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -54,6 +54,17 @@ const struct bnxt_qplib_gid bnxt_qplib_gid_zero = {{ 0, 0, 0, 0, 0, 0, 0, 0,
/* Device */
+static bool bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw *rcfw)
+{
+ u16 pcie_ctl2 = 0;
+
+ if (!bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx))
+ return false;
+
+ pcie_capability_read_word(rcfw->pdev, PCI_EXP_DEVCTL2, &pcie_ctl2);
+ return (pcie_ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ);
+}
+
static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw,
char *fw_ver)
{
@@ -162,7 +173,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc);
}
- attr->is_atomic = false;
+ attr->is_atomic = bnxt_qplib_is_atomic_cap(rcfw);
bail:
bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
return rc;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index bc228340684f..260104783691 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -42,8 +42,6 @@
#define BNXT_QPLIB_RESERVED_QP_WRS 128
-#define PCI_EXP_DEVCTL2_ATOMIC_REQ 0x0040
-
struct bnxt_qplib_dev_attr {
#define FW_VER_ARR_LEN 4
u8 fw_ver[FW_VER_ARR_LEN];
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 44c2416588d4..6c8c910f4e86 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -976,8 +976,8 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
chp = to_c4iw_cq(ib_cq);
xa_erase_irq(&chp->rhp->cqs, chp->cq.cqid);
- atomic_dec(&chp->refcnt);
- wait_event(chp->wait, !atomic_read(&chp->refcnt));
+ refcount_dec(&chp->refcnt);
+ wait_event(chp->wait, !refcount_read(&chp->refcnt));
ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext,
ibucontext);
@@ -1080,7 +1080,7 @@ int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
chp->ibcq.cqe = entries - 2;
spin_lock_init(&chp->lock);
spin_lock_init(&chp->comp_handler_lock);
- atomic_set(&chp->refcnt, 1);
+ refcount_set(&chp->refcnt, 1);
init_waitqueue_head(&chp->wait);
ret = xa_insert_irq(&rhp->cqs, chp->cq.cqid, chp, GFP_KERNEL);
if (ret)
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
index 4cd877bd2f56..7798d090888b 100644
--- a/drivers/infiniband/hw/cxgb4/ev.c
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -151,7 +151,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
}
c4iw_qp_add_ref(&qhp->ibqp);
- atomic_inc(&chp->refcnt);
+ refcount_inc(&chp->refcnt);
xa_unlock_irq(&dev->qps);
/* Bad incoming write */
@@ -213,7 +213,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
break;
}
done:
- if (atomic_dec_and_test(&chp->refcnt))
+ if (refcount_dec_and_test(&chp->refcnt))
wake_up(&chp->wait);
c4iw_qp_rem_ref(&qhp->ibqp);
out:
@@ -228,13 +228,13 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
xa_lock_irqsave(&dev->cqs, flag);
chp = xa_load(&dev->cqs, qid);
if (chp) {
- atomic_inc(&chp->refcnt);
+ refcount_inc(&chp->refcnt);
xa_unlock_irqrestore(&dev->cqs, flag);
t4_clear_cq_armed(&chp->cq);
spin_lock_irqsave(&chp->comp_handler_lock, flag);
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
- if (atomic_dec_and_test(&chp->refcnt))
+ if (refcount_dec_and_test(&chp->refcnt))
wake_up(&chp->wait);
} else {
pr_debug("unknown cqid 0x%x\n", qid);
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index cdec5deb37a1..3883af3d2312 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -427,7 +427,7 @@ struct c4iw_cq {
struct t4_cq cq;
spinlock_t lock;
spinlock_t comp_handler_lock;
- atomic_t refcnt;
+ refcount_t refcnt;
wait_queue_head_t wait;
struct c4iw_wr_wait *wr_waitp;
};
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 3f1893e180dd..881d515eb15a 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -377,14 +377,11 @@ static const char * const names[] = {
[IP6OUTRSTS] = "ip6OutRsts"
};
-static struct rdma_hw_stats *c4iw_alloc_stats(struct ib_device *ibdev,
- u32 port_num)
+static struct rdma_hw_stats *c4iw_alloc_device_stats(struct ib_device *ibdev)
{
BUILD_BUG_ON(ARRAY_SIZE(names) != NR_COUNTERS);
- if (port_num != 0)
- return NULL;
-
+ /* FIXME: these look like port stats */
return rdma_alloc_hw_stats_struct(names, NR_COUNTERS,
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
@@ -455,7 +452,7 @@ static const struct ib_device_ops c4iw_dev_ops = {
.driver_id = RDMA_DRIVER_CXGB4,
.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION,
- .alloc_hw_stats = c4iw_alloc_stats,
+ .alloc_hw_device_stats = c4iw_alloc_device_stats,
.alloc_mr = c4iw_alloc_mr,
.alloc_pd = c4iw_allocate_pd,
.alloc_ucontext = c4iw_alloc_ucontext,
@@ -468,6 +465,7 @@ static const struct ib_device_ops c4iw_dev_ops = {
.destroy_cq = c4iw_destroy_cq,
.destroy_qp = c4iw_destroy_qp,
.destroy_srq = c4iw_destroy_srq,
+ .device_group = &c4iw_attr_group,
.fill_res_cq_entry = c4iw_fill_res_cq_entry,
.fill_res_cm_id_entry = c4iw_fill_res_cm_id_entry,
.fill_res_mr_entry = c4iw_fill_res_mr_entry,
@@ -542,7 +540,6 @@ void c4iw_register_device(struct work_struct *work)
memcpy(dev->ibdev.iw_ifname, dev->rdev.lldi.ports[0]->name,
sizeof(dev->ibdev.iw_ifname));
- rdma_set_device_sysfs_group(&dev->ibdev, &c4iw_attr_group);
ib_set_device_ops(&dev->ibdev, &c4iw_dev_ops);
ret = set_netdevs(&dev->ibdev, &dev->rdev);
if (ret)
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index d109bb3822a5..a81fa7a56edb 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -295,6 +295,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
if (user && (!wq->sq.bar2_pa || (need_rq && !wq->rq.bar2_pa))) {
pr_warn("%s: sqid %u or rqid %u not in BAR2 range\n",
pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
+ ret = -EINVAL;
goto free_dma;
}
@@ -1963,7 +1964,6 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
t4_set_wq_in_error(&qhp->wq, 0);
set_state(qhp, C4IW_QP_STATE_ERROR);
if (!internal) {
- abort = 1;
disconnect = 1;
ep = qhp->ep;
c4iw_get_ep(&qhp->ep->com);
diff --git a/drivers/infiniband/hw/efa/efa.h b/drivers/infiniband/hw/efa/efa.h
index ea322cec27d2..2b8ca099b381 100644
--- a/drivers/infiniband/hw/efa/efa.h
+++ b/drivers/infiniband/hw/efa/efa.h
@@ -157,7 +157,8 @@ int efa_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_udata *udata);
enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev,
u32 port_num);
-struct rdma_hw_stats *efa_alloc_hw_stats(struct ib_device *ibdev, u32 port_num);
+struct rdma_hw_stats *efa_alloc_hw_port_stats(struct ib_device *ibdev, u32 port_num);
+struct rdma_hw_stats *efa_alloc_hw_device_stats(struct ib_device *ibdev);
int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
u32 port_num, int index);
diff --git a/drivers/infiniband/hw/efa/efa_main.c b/drivers/infiniband/hw/efa/efa_main.c
index 816cfd65b7ac..203e6ddcacbc 100644
--- a/drivers/infiniband/hw/efa/efa_main.c
+++ b/drivers/infiniband/hw/efa/efa_main.c
@@ -242,7 +242,8 @@ static const struct ib_device_ops efa_dev_ops = {
.driver_id = RDMA_DRIVER_EFA,
.uverbs_abi_ver = EFA_UVERBS_ABI_VERSION,
- .alloc_hw_stats = efa_alloc_hw_stats,
+ .alloc_hw_port_stats = efa_alloc_hw_port_stats,
+ .alloc_hw_device_stats = efa_alloc_hw_device_stats,
.alloc_pd = efa_alloc_pd,
.alloc_ucontext = efa_alloc_ucontext,
.create_cq = efa_create_cq,
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index 51572f1dc611..be6d3ff0f1be 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -1904,13 +1904,22 @@ int efa_destroy_ah(struct ib_ah *ibah, u32 flags)
return 0;
}
-struct rdma_hw_stats *efa_alloc_hw_stats(struct ib_device *ibdev, u32 port_num)
+struct rdma_hw_stats *efa_alloc_hw_port_stats(struct ib_device *ibdev, u32 port_num)
{
return rdma_alloc_hw_stats_struct(efa_stats_names,
ARRAY_SIZE(efa_stats_names),
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
+struct rdma_hw_stats *efa_alloc_hw_device_stats(struct ib_device *ibdev)
+{
+ /*
+ * It is probably a bug that efa reports its port stats as device
+ * stats
+ */
+ return efa_alloc_hw_port_stats(ibdev, 0);
+}
+
int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
u32 port_num, int index)
{
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 5eeae8df415b..c97544638367 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -14186,7 +14186,7 @@ static void init_kdeth_qp(struct hfi1_devdata *dd)
}
/**
- * hfi1_get_qp_map
+ * hfi1_get_qp_map - get qp map
* @dd: device data
* @idx: index to read
*/
@@ -14199,7 +14199,7 @@ u8 hfi1_get_qp_map(struct hfi1_devdata *dd, u8 idx)
}
/**
- * init_qpmap_table
+ * init_qpmap_table - init qp map
* @dd: device data
* @first_ctxt: first context
* @last_ctxt: first context
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 3b7bbc7b9d10..955c3637980e 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -736,7 +736,7 @@ static u64 kvirt_to_phys(void *addr)
}
/**
- * complete_subctxt
+ * complete_subctxt - complete sub-context info
* @fd: valid filedata pointer
*
* Sub-context info can only be set up after the base context
@@ -841,7 +841,7 @@ static int assign_ctxt(struct hfi1_filedata *fd, unsigned long arg, u32 len)
}
/**
- * match_ctxt
+ * match_ctxt - match context
* @fd: valid filedata pointer
* @uinfo: user info to compare base context with
* @uctxt: context to compare uinfo to.
@@ -898,7 +898,7 @@ static int match_ctxt(struct hfi1_filedata *fd,
}
/**
- * find_sub_ctxt
+ * find_sub_ctxt - fund sub-context
* @fd: valid filedata pointer
* @uinfo: matching info to use to find a possible context to share.
*
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index 867ae0b1aa95..31664f43c27f 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -772,10 +772,6 @@ struct hfi1_pportdata {
struct hfi1_ibport ibport_data;
struct hfi1_devdata *dd;
- struct kobject pport_cc_kobj;
- struct kobject sc2vl_kobj;
- struct kobject sl2sc_kobj;
- struct kobject vl2mtu_kobj;
/* PHY support */
struct qsfp_data qsfp_info;
@@ -1764,7 +1760,7 @@ static inline void pause_for_credit_return(struct hfi1_devdata *dd)
}
/**
- * sc_to_vlt() reverse lookup sc to vl
+ * sc_to_vlt() - reverse lookup sc to vl
* @dd - devdata
* @sc5 - 5 bit sc
*/
@@ -2188,12 +2184,11 @@ static inline bool hfi1_packet_present(struct hfi1_ctxtdata *rcd)
extern const char ib_hfi1_version[];
extern const struct attribute_group ib_hfi1_attr_group;
+extern const struct attribute_group *hfi1_attr_port_groups[];
int hfi1_device_create(struct hfi1_devdata *dd);
void hfi1_device_remove(struct hfi1_devdata *dd);
-int hfi1_create_port_files(struct ib_device *ibdev, u32 port_num,
- struct kobject *kobj);
int hfi1_verbs_register_sysfs(struct hfi1_devdata *dd);
void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *dd);
/* Hook for sysfs read of QSFP */
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index e3a8a420c045..0986aa065418 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -312,7 +312,7 @@ struct hfi1_ctxtdata *hfi1_rcd_get_by_index_safe(struct hfi1_devdata *dd,
}
/**
- * hfi1_rcd_get_by_index
+ * hfi1_rcd_get_by_index - get by index
* @dd: pointer to a valid devdata structure
* @ctxt: the index of an possilbe rcd
*
@@ -499,7 +499,7 @@ bail:
}
/**
- * hfi1_free_ctxt
+ * hfi1_free_ctxt - free context
* @rcd: pointer to an initialized rcd data structure
*
* This wrapper is the free function that matches hfi1_create_ctxtdata().
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index ff864f6f0266..e276522104c6 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -993,7 +993,7 @@ static bool is_sc_halted(struct hfi1_devdata *dd, u32 hw_context)
}
/**
- * sc_wait_for_packet_egress
+ * sc_wait_for_packet_egress - wait for packet
* @sc: valid send context
* @pause: wait for credit return
*
diff --git a/drivers/infiniband/hw/hfi1/pio.h b/drivers/infiniband/hw/hfi1/pio.h
index 0102262343c0..9e5f08d2b985 100644
--- a/drivers/infiniband/hw/hfi1/pio.h
+++ b/drivers/infiniband/hw/hfi1/pio.h
@@ -279,7 +279,6 @@ int init_credit_return(struct hfi1_devdata *dd);
void free_credit_return(struct hfi1_devdata *dd);
int init_sc_pools_and_sizes(struct hfi1_devdata *dd);
int init_send_contexts(struct hfi1_devdata *dd);
-int init_credit_return(struct hfi1_devdata *dd);
int init_pervl_scs(struct hfi1_devdata *dd);
struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
uint hdrqentsize, int numa);
@@ -294,7 +293,6 @@ void sc_stop(struct send_context *sc, int bit);
struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len,
pio_release_cb cb, void *arg);
void sc_release_update(struct send_context *sc);
-void sc_return_credits(struct send_context *sc);
void sc_group_release_update(struct hfi1_devdata *dd, u32 hw_context);
void sc_add_credit_return_intr(struct send_context *sc);
void sc_del_credit_return_intr(struct send_context *sc);
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 1fcc6e9666e0..eb15c310d63d 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -3130,7 +3130,7 @@ int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
}
if (type == SDMA_MAP_PAGE) {
- kvaddr = kmap(page);
+ kvaddr = kmap_local_page(page);
kvaddr += offset;
} else if (WARN_ON(!kvaddr)) {
__sdma_txclean(dd, tx);
@@ -3140,7 +3140,7 @@ int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
memcpy(tx->coalesce_buf + tx->coalesce_idx, kvaddr, len);
tx->coalesce_idx += len;
if (type == SDMA_MAP_PAGE)
- kunmap(page);
+ kunmap_local(kvaddr);
/* If there is more data, return */
if (tx->tlen - tx->coalesce_idx)
diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c
index eaf441ece25e..acfcbedebe0d 100644
--- a/drivers/infiniband/hw/hfi1/sysfs.c
+++ b/drivers/infiniband/hw/hfi1/sysfs.c
@@ -45,11 +45,21 @@
*
*/
#include <linux/ctype.h>
+#include <rdma/ib_sysfs.h>
#include "hfi.h"
#include "mad.h"
#include "trace.h"
+static struct hfi1_pportdata *hfi1_get_pportdata_kobj(struct kobject *kobj)
+{
+ u32 port_num;
+ struct ib_device *ibdev = ib_port_sysfs_get_ibdev_kobj(kobj, &port_num);
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+
+ return &dd->pport[port_num - 1];
+}
+
/*
* Start of per-port congestion control structures and support code
*/
@@ -57,13 +67,12 @@
/*
* Congestion control table size followed by table entries
*/
-static ssize_t read_cc_table_bin(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t pos, size_t count)
+static ssize_t cc_table_bin_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t pos, size_t count)
{
int ret;
- struct hfi1_pportdata *ppd =
- container_of(kobj, struct hfi1_pportdata, pport_cc_kobj);
+ struct hfi1_pportdata *ppd = hfi1_get_pportdata_kobj(kobj);
struct cc_state *cc_state;
ret = ppd->total_cct_entry * sizeof(struct ib_cc_table_entry_shadow)
@@ -89,30 +98,19 @@ static ssize_t read_cc_table_bin(struct file *filp, struct kobject *kobj,
return count;
}
-
-static void port_release(struct kobject *kobj)
-{
- /* nothing to do since memory is freed by hfi1_free_devdata() */
-}
-
-static const struct bin_attribute cc_table_bin_attr = {
- .attr = {.name = "cc_table_bin", .mode = 0444},
- .read = read_cc_table_bin,
- .size = PAGE_SIZE,
-};
+static BIN_ATTR_RO(cc_table_bin, PAGE_SIZE);
/*
* Congestion settings: port control, control map and an array of 16
* entries for the congestion entries - increase, timer, event log
* trigger threshold and the minimum injection rate delay.
*/
-static ssize_t read_cc_setting_bin(struct file *filp, struct kobject *kobj,
+static ssize_t cc_setting_bin_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
+ struct hfi1_pportdata *ppd = hfi1_get_pportdata_kobj(kobj);
int ret;
- struct hfi1_pportdata *ppd =
- container_of(kobj, struct hfi1_pportdata, pport_cc_kobj);
struct cc_state *cc_state;
ret = sizeof(struct opa_congestion_setting_attr_shadow);
@@ -136,27 +134,30 @@ static ssize_t read_cc_setting_bin(struct file *filp, struct kobject *kobj,
return count;
}
+static BIN_ATTR_RO(cc_setting_bin, PAGE_SIZE);
-static const struct bin_attribute cc_setting_bin_attr = {
- .attr = {.name = "cc_settings_bin", .mode = 0444},
- .read = read_cc_setting_bin,
- .size = PAGE_SIZE,
-};
-
-struct hfi1_port_attr {
- struct attribute attr;
- ssize_t (*show)(struct hfi1_pportdata *, char *);
- ssize_t (*store)(struct hfi1_pportdata *, const char *, size_t);
+static struct bin_attribute *port_cc_bin_attributes[] = {
+ &bin_attr_cc_setting_bin,
+ &bin_attr_cc_table_bin,
+ NULL
};
-static ssize_t cc_prescan_show(struct hfi1_pportdata *ppd, char *buf)
+static ssize_t cc_prescan_show(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *attr, char *buf)
{
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ struct hfi1_pportdata *ppd = &dd->pport[port_num - 1];
+
return sysfs_emit(buf, "%s\n", ppd->cc_prescan ? "on" : "off");
}
-static ssize_t cc_prescan_store(struct hfi1_pportdata *ppd, const char *buf,
+static ssize_t cc_prescan_store(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *attr, const char *buf,
size_t count)
{
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ struct hfi1_pportdata *ppd = &dd->pport[port_num - 1];
+
if (!memcmp(buf, "on", 2))
ppd->cc_prescan = true;
else if (!memcmp(buf, "off", 3))
@@ -164,60 +165,41 @@ static ssize_t cc_prescan_store(struct hfi1_pportdata *ppd, const char *buf,
return count;
}
+static IB_PORT_ATTR_ADMIN_RW(cc_prescan);
-static struct hfi1_port_attr cc_prescan_attr =
- __ATTR(cc_prescan, 0600, cc_prescan_show, cc_prescan_store);
-
-static ssize_t cc_attr_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct hfi1_port_attr *port_attr =
- container_of(attr, struct hfi1_port_attr, attr);
- struct hfi1_pportdata *ppd =
- container_of(kobj, struct hfi1_pportdata, pport_cc_kobj);
-
- return port_attr->show(ppd, buf);
-}
-
-static ssize_t cc_attr_store(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t count)
-{
- struct hfi1_port_attr *port_attr =
- container_of(attr, struct hfi1_port_attr, attr);
- struct hfi1_pportdata *ppd =
- container_of(kobj, struct hfi1_pportdata, pport_cc_kobj);
-
- return port_attr->store(ppd, buf, count);
-}
-
-static const struct sysfs_ops port_cc_sysfs_ops = {
- .show = cc_attr_show,
- .store = cc_attr_store
-};
-
-static struct attribute *port_cc_default_attributes[] = {
- &cc_prescan_attr.attr,
+static struct attribute *port_cc_attributes[] = {
+ &ib_port_attr_cc_prescan.attr,
NULL
};
-static struct kobj_type port_cc_ktype = {
- .release = port_release,
- .sysfs_ops = &port_cc_sysfs_ops,
- .default_attrs = port_cc_default_attributes
+static const struct attribute_group port_cc_group = {
+ .name = "CCMgtA",
+ .attrs = port_cc_attributes,
+ .bin_attrs = port_cc_bin_attributes,
};
/* Start sc2vl */
-#define HFI1_SC2VL_ATTR(N) \
- static struct hfi1_sc2vl_attr hfi1_sc2vl_attr_##N = { \
- .attr = { .name = __stringify(N), .mode = 0444 }, \
- .sc = N \
- }
-
struct hfi1_sc2vl_attr {
- struct attribute attr;
+ struct ib_port_attribute attr;
int sc;
};
+static ssize_t sc2vl_attr_show(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *attr, char *buf)
+{
+ struct hfi1_sc2vl_attr *sattr =
+ container_of(attr, struct hfi1_sc2vl_attr, attr);
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+
+ return sysfs_emit(buf, "%u\n", *((u8 *)dd->sc2vl + sattr->sc));
+}
+
+#define HFI1_SC2VL_ATTR(N) \
+ static struct hfi1_sc2vl_attr hfi1_sc2vl_attr_##N = { \
+ .attr = __ATTR(N, 0444, sc2vl_attr_show, NULL), \
+ .sc = N, \
+ }
+
HFI1_SC2VL_ATTR(0);
HFI1_SC2VL_ATTR(1);
HFI1_SC2VL_ATTR(2);
@@ -251,78 +233,70 @@ HFI1_SC2VL_ATTR(29);
HFI1_SC2VL_ATTR(30);
HFI1_SC2VL_ATTR(31);
-static struct attribute *sc2vl_default_attributes[] = {
- &hfi1_sc2vl_attr_0.attr,
- &hfi1_sc2vl_attr_1.attr,
- &hfi1_sc2vl_attr_2.attr,
- &hfi1_sc2vl_attr_3.attr,
- &hfi1_sc2vl_attr_4.attr,
- &hfi1_sc2vl_attr_5.attr,
- &hfi1_sc2vl_attr_6.attr,
- &hfi1_sc2vl_attr_7.attr,
- &hfi1_sc2vl_attr_8.attr,
- &hfi1_sc2vl_attr_9.attr,
- &hfi1_sc2vl_attr_10.attr,
- &hfi1_sc2vl_attr_11.attr,
- &hfi1_sc2vl_attr_12.attr,
- &hfi1_sc2vl_attr_13.attr,
- &hfi1_sc2vl_attr_14.attr,
- &hfi1_sc2vl_attr_15.attr,
- &hfi1_sc2vl_attr_16.attr,
- &hfi1_sc2vl_attr_17.attr,
- &hfi1_sc2vl_attr_18.attr,
- &hfi1_sc2vl_attr_19.attr,
- &hfi1_sc2vl_attr_20.attr,
- &hfi1_sc2vl_attr_21.attr,
- &hfi1_sc2vl_attr_22.attr,
- &hfi1_sc2vl_attr_23.attr,
- &hfi1_sc2vl_attr_24.attr,
- &hfi1_sc2vl_attr_25.attr,
- &hfi1_sc2vl_attr_26.attr,
- &hfi1_sc2vl_attr_27.attr,
- &hfi1_sc2vl_attr_28.attr,
- &hfi1_sc2vl_attr_29.attr,
- &hfi1_sc2vl_attr_30.attr,
- &hfi1_sc2vl_attr_31.attr,
+static struct attribute *port_sc2vl_attributes[] = {
+ &hfi1_sc2vl_attr_0.attr.attr,
+ &hfi1_sc2vl_attr_1.attr.attr,
+ &hfi1_sc2vl_attr_2.attr.attr,
+ &hfi1_sc2vl_attr_3.attr.attr,
+ &hfi1_sc2vl_attr_4.attr.attr,
+ &hfi1_sc2vl_attr_5.attr.attr,
+ &hfi1_sc2vl_attr_6.attr.attr,
+ &hfi1_sc2vl_attr_7.attr.attr,
+ &hfi1_sc2vl_attr_8.attr.attr,
+ &hfi1_sc2vl_attr_9.attr.attr,
+ &hfi1_sc2vl_attr_10.attr.attr,
+ &hfi1_sc2vl_attr_11.attr.attr,
+ &hfi1_sc2vl_attr_12.attr.attr,
+ &hfi1_sc2vl_attr_13.attr.attr,
+ &hfi1_sc2vl_attr_14.attr.attr,
+ &hfi1_sc2vl_attr_15.attr.attr,
+ &hfi1_sc2vl_attr_16.attr.attr,
+ &hfi1_sc2vl_attr_17.attr.attr,
+ &hfi1_sc2vl_attr_18.attr.attr,
+ &hfi1_sc2vl_attr_19.attr.attr,
+ &hfi1_sc2vl_attr_20.attr.attr,
+ &hfi1_sc2vl_attr_21.attr.attr,
+ &hfi1_sc2vl_attr_22.attr.attr,
+ &hfi1_sc2vl_attr_23.attr.attr,
+ &hfi1_sc2vl_attr_24.attr.attr,
+ &hfi1_sc2vl_attr_25.attr.attr,
+ &hfi1_sc2vl_attr_26.attr.attr,
+ &hfi1_sc2vl_attr_27.attr.attr,
+ &hfi1_sc2vl_attr_28.attr.attr,
+ &hfi1_sc2vl_attr_29.attr.attr,
+ &hfi1_sc2vl_attr_30.attr.attr,
+ &hfi1_sc2vl_attr_31.attr.attr,
NULL
};
-static ssize_t sc2vl_attr_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct hfi1_sc2vl_attr *sattr =
- container_of(attr, struct hfi1_sc2vl_attr, attr);
- struct hfi1_pportdata *ppd =
- container_of(kobj, struct hfi1_pportdata, sc2vl_kobj);
- struct hfi1_devdata *dd = ppd->dd;
-
- return sysfs_emit(buf, "%u\n", *((u8 *)dd->sc2vl + sattr->sc));
-}
-
-static const struct sysfs_ops hfi1_sc2vl_ops = {
- .show = sc2vl_attr_show,
-};
-
-static struct kobj_type hfi1_sc2vl_ktype = {
- .release = port_release,
- .sysfs_ops = &hfi1_sc2vl_ops,
- .default_attrs = sc2vl_default_attributes
+static const struct attribute_group port_sc2vl_group = {
+ .name = "sc2vl",
+ .attrs = port_sc2vl_attributes,
};
-
/* End sc2vl */
/* Start sl2sc */
-#define HFI1_SL2SC_ATTR(N) \
- static struct hfi1_sl2sc_attr hfi1_sl2sc_attr_##N = { \
- .attr = { .name = __stringify(N), .mode = 0444 }, \
- .sl = N \
- }
-
struct hfi1_sl2sc_attr {
- struct attribute attr;
+ struct ib_port_attribute attr;
int sl;
};
+static ssize_t sl2sc_attr_show(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *attr, char *buf)
+{
+ struct hfi1_sl2sc_attr *sattr =
+ container_of(attr, struct hfi1_sl2sc_attr, attr);
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ struct hfi1_ibport *ibp = &dd->pport[port_num - 1].ibport_data;
+
+ return sysfs_emit(buf, "%u\n", ibp->sl_to_sc[sattr->sl]);
+}
+
+#define HFI1_SL2SC_ATTR(N) \
+ static struct hfi1_sl2sc_attr hfi1_sl2sc_attr_##N = { \
+ .attr = __ATTR(N, 0444, sl2sc_attr_show, NULL), .sl = N \
+ }
+
HFI1_SL2SC_ATTR(0);
HFI1_SL2SC_ATTR(1);
HFI1_SL2SC_ATTR(2);
@@ -356,79 +330,72 @@ HFI1_SL2SC_ATTR(29);
HFI1_SL2SC_ATTR(30);
HFI1_SL2SC_ATTR(31);
-static struct attribute *sl2sc_default_attributes[] = {
- &hfi1_sl2sc_attr_0.attr,
- &hfi1_sl2sc_attr_1.attr,
- &hfi1_sl2sc_attr_2.attr,
- &hfi1_sl2sc_attr_3.attr,
- &hfi1_sl2sc_attr_4.attr,
- &hfi1_sl2sc_attr_5.attr,
- &hfi1_sl2sc_attr_6.attr,
- &hfi1_sl2sc_attr_7.attr,
- &hfi1_sl2sc_attr_8.attr,
- &hfi1_sl2sc_attr_9.attr,
- &hfi1_sl2sc_attr_10.attr,
- &hfi1_sl2sc_attr_11.attr,
- &hfi1_sl2sc_attr_12.attr,
- &hfi1_sl2sc_attr_13.attr,
- &hfi1_sl2sc_attr_14.attr,
- &hfi1_sl2sc_attr_15.attr,
- &hfi1_sl2sc_attr_16.attr,
- &hfi1_sl2sc_attr_17.attr,
- &hfi1_sl2sc_attr_18.attr,
- &hfi1_sl2sc_attr_19.attr,
- &hfi1_sl2sc_attr_20.attr,
- &hfi1_sl2sc_attr_21.attr,
- &hfi1_sl2sc_attr_22.attr,
- &hfi1_sl2sc_attr_23.attr,
- &hfi1_sl2sc_attr_24.attr,
- &hfi1_sl2sc_attr_25.attr,
- &hfi1_sl2sc_attr_26.attr,
- &hfi1_sl2sc_attr_27.attr,
- &hfi1_sl2sc_attr_28.attr,
- &hfi1_sl2sc_attr_29.attr,
- &hfi1_sl2sc_attr_30.attr,
- &hfi1_sl2sc_attr_31.attr,
+static struct attribute *port_sl2sc_attributes[] = {
+ &hfi1_sl2sc_attr_0.attr.attr,
+ &hfi1_sl2sc_attr_1.attr.attr,
+ &hfi1_sl2sc_attr_2.attr.attr,
+ &hfi1_sl2sc_attr_3.attr.attr,
+ &hfi1_sl2sc_attr_4.attr.attr,
+ &hfi1_sl2sc_attr_5.attr.attr,
+ &hfi1_sl2sc_attr_6.attr.attr,
+ &hfi1_sl2sc_attr_7.attr.attr,
+ &hfi1_sl2sc_attr_8.attr.attr,
+ &hfi1_sl2sc_attr_9.attr.attr,
+ &hfi1_sl2sc_attr_10.attr.attr,
+ &hfi1_sl2sc_attr_11.attr.attr,
+ &hfi1_sl2sc_attr_12.attr.attr,
+ &hfi1_sl2sc_attr_13.attr.attr,
+ &hfi1_sl2sc_attr_14.attr.attr,
+ &hfi1_sl2sc_attr_15.attr.attr,
+ &hfi1_sl2sc_attr_16.attr.attr,
+ &hfi1_sl2sc_attr_17.attr.attr,
+ &hfi1_sl2sc_attr_18.attr.attr,
+ &hfi1_sl2sc_attr_19.attr.attr,
+ &hfi1_sl2sc_attr_20.attr.attr,
+ &hfi1_sl2sc_attr_21.attr.attr,
+ &hfi1_sl2sc_attr_22.attr.attr,
+ &hfi1_sl2sc_attr_23.attr.attr,
+ &hfi1_sl2sc_attr_24.attr.attr,
+ &hfi1_sl2sc_attr_25.attr.attr,
+ &hfi1_sl2sc_attr_26.attr.attr,
+ &hfi1_sl2sc_attr_27.attr.attr,
+ &hfi1_sl2sc_attr_28.attr.attr,
+ &hfi1_sl2sc_attr_29.attr.attr,
+ &hfi1_sl2sc_attr_30.attr.attr,
+ &hfi1_sl2sc_attr_31.attr.attr,
NULL
};
-static ssize_t sl2sc_attr_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct hfi1_sl2sc_attr *sattr =
- container_of(attr, struct hfi1_sl2sc_attr, attr);
- struct hfi1_pportdata *ppd =
- container_of(kobj, struct hfi1_pportdata, sl2sc_kobj);
- struct hfi1_ibport *ibp = &ppd->ibport_data;
-
- return sysfs_emit(buf, "%u\n", ibp->sl_to_sc[sattr->sl]);
-}
-
-static const struct sysfs_ops hfi1_sl2sc_ops = {
- .show = sl2sc_attr_show,
-};
-
-static struct kobj_type hfi1_sl2sc_ktype = {
- .release = port_release,
- .sysfs_ops = &hfi1_sl2sc_ops,
- .default_attrs = sl2sc_default_attributes
+static const struct attribute_group port_sl2sc_group = {
+ .name = "sl2sc",
+ .attrs = port_sl2sc_attributes,
};
/* End sl2sc */
/* Start vl2mtu */
-#define HFI1_VL2MTU_ATTR(N) \
- static struct hfi1_vl2mtu_attr hfi1_vl2mtu_attr_##N = { \
- .attr = { .name = __stringify(N), .mode = 0444 }, \
- .vl = N \
- }
-
struct hfi1_vl2mtu_attr {
- struct attribute attr;
+ struct ib_port_attribute attr;
int vl;
};
+static ssize_t vl2mtu_attr_show(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *attr, char *buf)
+{
+ struct hfi1_vl2mtu_attr *vlattr =
+ container_of(attr, struct hfi1_vl2mtu_attr, attr);
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+
+ return sysfs_emit(buf, "%u\n", dd->vld[vlattr->vl].mtu);
+}
+
+#define HFI1_VL2MTU_ATTR(N) \
+ static struct hfi1_vl2mtu_attr hfi1_vl2mtu_attr_##N = { \
+ .attr = __ATTR(N, 0444, vl2mtu_attr_show, NULL), \
+ .vl = N, \
+ }
+
HFI1_VL2MTU_ATTR(0);
HFI1_VL2MTU_ATTR(1);
HFI1_VL2MTU_ATTR(2);
@@ -446,46 +413,29 @@ HFI1_VL2MTU_ATTR(13);
HFI1_VL2MTU_ATTR(14);
HFI1_VL2MTU_ATTR(15);
-static struct attribute *vl2mtu_default_attributes[] = {
- &hfi1_vl2mtu_attr_0.attr,
- &hfi1_vl2mtu_attr_1.attr,
- &hfi1_vl2mtu_attr_2.attr,
- &hfi1_vl2mtu_attr_3.attr,
- &hfi1_vl2mtu_attr_4.attr,
- &hfi1_vl2mtu_attr_5.attr,
- &hfi1_vl2mtu_attr_6.attr,
- &hfi1_vl2mtu_attr_7.attr,
- &hfi1_vl2mtu_attr_8.attr,
- &hfi1_vl2mtu_attr_9.attr,
- &hfi1_vl2mtu_attr_10.attr,
- &hfi1_vl2mtu_attr_11.attr,
- &hfi1_vl2mtu_attr_12.attr,
- &hfi1_vl2mtu_attr_13.attr,
- &hfi1_vl2mtu_attr_14.attr,
- &hfi1_vl2mtu_attr_15.attr,
+static struct attribute *port_vl2mtu_attributes[] = {
+ &hfi1_vl2mtu_attr_0.attr.attr,
+ &hfi1_vl2mtu_attr_1.attr.attr,
+ &hfi1_vl2mtu_attr_2.attr.attr,
+ &hfi1_vl2mtu_attr_3.attr.attr,
+ &hfi1_vl2mtu_attr_4.attr.attr,
+ &hfi1_vl2mtu_attr_5.attr.attr,
+ &hfi1_vl2mtu_attr_6.attr.attr,
+ &hfi1_vl2mtu_attr_7.attr.attr,
+ &hfi1_vl2mtu_attr_8.attr.attr,
+ &hfi1_vl2mtu_attr_9.attr.attr,
+ &hfi1_vl2mtu_attr_10.attr.attr,
+ &hfi1_vl2mtu_attr_11.attr.attr,
+ &hfi1_vl2mtu_attr_12.attr.attr,
+ &hfi1_vl2mtu_attr_13.attr.attr,
+ &hfi1_vl2mtu_attr_14.attr.attr,
+ &hfi1_vl2mtu_attr_15.attr.attr,
NULL
};
-static ssize_t vl2mtu_attr_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct hfi1_vl2mtu_attr *vlattr =
- container_of(attr, struct hfi1_vl2mtu_attr, attr);
- struct hfi1_pportdata *ppd =
- container_of(kobj, struct hfi1_pportdata, vl2mtu_kobj);
- struct hfi1_devdata *dd = ppd->dd;
-
- return sysfs_emit(buf, "%u\n", dd->vld[vlattr->vl].mtu);
-}
-
-static const struct sysfs_ops hfi1_vl2mtu_ops = {
- .show = vl2mtu_attr_show,
-};
-
-static struct kobj_type hfi1_vl2mtu_ktype = {
- .release = port_release,
- .sysfs_ops = &hfi1_vl2mtu_ops,
- .default_attrs = vl2mtu_default_attributes
+static const struct attribute_group port_vl2mtu_group = {
+ .name = "vl2mtu",
+ .attrs = port_vl2mtu_attributes,
};
/* end of per-port file structures and support code */
@@ -649,101 +599,13 @@ const struct attribute_group ib_hfi1_attr_group = {
.attrs = hfi1_attributes,
};
-int hfi1_create_port_files(struct ib_device *ibdev, u32 port_num,
- struct kobject *kobj)
-{
- struct hfi1_pportdata *ppd;
- struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
- int ret;
-
- if (!port_num || port_num > dd->num_pports) {
- dd_dev_err(dd,
- "Skipping infiniband class with invalid port %u\n",
- port_num);
- return -ENODEV;
- }
- ppd = &dd->pport[port_num - 1];
-
- ret = kobject_init_and_add(&ppd->sc2vl_kobj, &hfi1_sc2vl_ktype, kobj,
- "sc2vl");
- if (ret) {
- dd_dev_err(dd,
- "Skipping sc2vl sysfs info, (err %d) port %u\n",
- ret, port_num);
- /*
- * Based on the documentation for kobject_init_and_add(), the
- * caller should call kobject_put even if this call fails.
- */
- goto bail_sc2vl;
- }
- kobject_uevent(&ppd->sc2vl_kobj, KOBJ_ADD);
-
- ret = kobject_init_and_add(&ppd->sl2sc_kobj, &hfi1_sl2sc_ktype, kobj,
- "sl2sc");
- if (ret) {
- dd_dev_err(dd,
- "Skipping sl2sc sysfs info, (err %d) port %u\n",
- ret, port_num);
- goto bail_sl2sc;
- }
- kobject_uevent(&ppd->sl2sc_kobj, KOBJ_ADD);
-
- ret = kobject_init_and_add(&ppd->vl2mtu_kobj, &hfi1_vl2mtu_ktype, kobj,
- "vl2mtu");
- if (ret) {
- dd_dev_err(dd,
- "Skipping vl2mtu sysfs info, (err %d) port %u\n",
- ret, port_num);
- goto bail_vl2mtu;
- }
- kobject_uevent(&ppd->vl2mtu_kobj, KOBJ_ADD);
-
- ret = kobject_init_and_add(&ppd->pport_cc_kobj, &port_cc_ktype,
- kobj, "CCMgtA");
- if (ret) {
- dd_dev_err(dd,
- "Skipping Congestion Control sysfs info, (err %d) port %u\n",
- ret, port_num);
- goto bail_cc;
- }
-
- kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD);
-
- ret = sysfs_create_bin_file(&ppd->pport_cc_kobj, &cc_setting_bin_attr);
- if (ret) {
- dd_dev_err(dd,
- "Skipping Congestion Control setting sysfs info, (err %d) port %u\n",
- ret, port_num);
- goto bail_cc;
- }
-
- ret = sysfs_create_bin_file(&ppd->pport_cc_kobj, &cc_table_bin_attr);
- if (ret) {
- dd_dev_err(dd,
- "Skipping Congestion Control table sysfs info, (err %d) port %u\n",
- ret, port_num);
- goto bail_cc_entry_bin;
- }
-
- dd_dev_info(dd,
- "Congestion Control Agent enabled for port %d\n",
- port_num);
-
- return 0;
-
-bail_cc_entry_bin:
- sysfs_remove_bin_file(&ppd->pport_cc_kobj,
- &cc_setting_bin_attr);
-bail_cc:
- kobject_put(&ppd->pport_cc_kobj);
-bail_vl2mtu:
- kobject_put(&ppd->vl2mtu_kobj);
-bail_sl2sc:
- kobject_put(&ppd->sl2sc_kobj);
-bail_sc2vl:
- kobject_put(&ppd->sc2vl_kobj);
- return ret;
-}
+const struct attribute_group *hfi1_attr_port_groups[] = {
+ &port_cc_group,
+ &port_sc2vl_group,
+ &port_sl2sc_group,
+ &port_vl2mtu_group,
+ NULL,
+};
struct sde_attribute {
struct attribute attr;
@@ -868,23 +730,9 @@ bail:
*/
void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *dd)
{
- struct hfi1_pportdata *ppd;
int i;
/* Unwind operations in hfi1_verbs_register_sysfs() */
for (i = 0; i < dd->num_sdma; i++)
kobject_put(&dd->per_sdma[i].kobj);
-
- for (i = 0; i < dd->num_pports; i++) {
- ppd = &dd->pport[i];
-
- sysfs_remove_bin_file(&ppd->pport_cc_kobj,
- &cc_setting_bin_attr);
- sysfs_remove_bin_file(&ppd->pport_cc_kobj,
- &cc_table_bin_attr);
- kobject_put(&ppd->pport_cc_kobj);
- kobject_put(&ppd->vl2mtu_kobj);
- kobject_put(&ppd->sl2sc_kobj);
- kobject_put(&ppd->sc2vl_kobj);
- }
}
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
index 0b1f9e4d038b..233ea48b72c8 100644
--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
@@ -1115,7 +1115,7 @@ static u32 kern_find_pages(struct tid_rdma_flow *flow,
}
flow->length = flow->req->seg_len - length;
- *last = req->isge == ss->num_sge ? false : true;
+ *last = req->isge != ss->num_sge;
return i;
}
diff --git a/drivers/infiniband/hw/hfi1/trace.c b/drivers/infiniband/hw/hfi1/trace.c
index b219ea90fd6f..715c81308b85 100644
--- a/drivers/infiniband/hw/hfi1/trace.c
+++ b/drivers/infiniband/hw/hfi1/trace.c
@@ -189,6 +189,11 @@ void hfi1_trace_parse_16b_bth(struct ib_other_headers *ohdr,
*qpn = ib_bth_get_qpn(ohdr);
}
+static u16 ib_get_len(const struct ib_header *hdr)
+{
+ return be16_to_cpu(hdr->lrh[2]);
+}
+
void hfi1_trace_parse_9b_hdr(struct ib_header *hdr, bool sc5,
u8 *lnh, u8 *lver, u8 *sl, u8 *sc,
u16 *len, u32 *dlid, u32 *slid)
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 554294340caa..9b198c35e1a1 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1693,54 +1693,53 @@ static int init_cntr_names(const char *names_in,
return 0;
}
-static struct rdma_hw_stats *alloc_hw_stats(struct ib_device *ibdev,
- u32 port_num)
+static int init_counters(struct ib_device *ibdev)
{
- int i, err;
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ int i, err = 0;
mutex_lock(&cntr_names_lock);
- if (!cntr_names_initialized) {
- struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
-
- err = init_cntr_names(dd->cntrnames,
- dd->cntrnameslen,
- num_driver_cntrs,
- &num_dev_cntrs,
- &dev_cntr_names);
- if (err) {
- mutex_unlock(&cntr_names_lock);
- return NULL;
- }
-
- for (i = 0; i < num_driver_cntrs; i++)
- dev_cntr_names[num_dev_cntrs + i] =
- driver_cntr_names[i];
-
- err = init_cntr_names(dd->portcntrnames,
- dd->portcntrnameslen,
- 0,
- &num_port_cntrs,
- &port_cntr_names);
- if (err) {
- kfree(dev_cntr_names);
- dev_cntr_names = NULL;
- mutex_unlock(&cntr_names_lock);
- return NULL;
- }
- cntr_names_initialized = 1;
+ if (cntr_names_initialized)
+ goto out_unlock;
+
+ err = init_cntr_names(dd->cntrnames, dd->cntrnameslen, num_driver_cntrs,
+ &num_dev_cntrs, &dev_cntr_names);
+ if (err)
+ goto out_unlock;
+
+ for (i = 0; i < num_driver_cntrs; i++)
+ dev_cntr_names[num_dev_cntrs + i] = driver_cntr_names[i];
+
+ err = init_cntr_names(dd->portcntrnames, dd->portcntrnameslen, 0,
+ &num_port_cntrs, &port_cntr_names);
+ if (err) {
+ kfree(dev_cntr_names);
+ dev_cntr_names = NULL;
+ goto out_unlock;
}
+ cntr_names_initialized = 1;
+
+out_unlock:
mutex_unlock(&cntr_names_lock);
+ return err;
+}
- if (!port_num)
- return rdma_alloc_hw_stats_struct(
- dev_cntr_names,
- num_dev_cntrs + num_driver_cntrs,
- RDMA_HW_STATS_DEFAULT_LIFESPAN);
- else
- return rdma_alloc_hw_stats_struct(
- port_cntr_names,
- num_port_cntrs,
- RDMA_HW_STATS_DEFAULT_LIFESPAN);
+static struct rdma_hw_stats *hfi1_alloc_hw_device_stats(struct ib_device *ibdev)
+{
+ if (init_counters(ibdev))
+ return NULL;
+ return rdma_alloc_hw_stats_struct(dev_cntr_names,
+ num_dev_cntrs + num_driver_cntrs,
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+}
+
+static struct rdma_hw_stats *hfi_alloc_hw_port_stats(struct ib_device *ibdev,
+ u32 port_num)
+{
+ if (init_counters(ibdev))
+ return NULL;
+ return rdma_alloc_hw_stats_struct(port_cntr_names, num_port_cntrs,
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
static u64 hfi1_sps_ints(void)
@@ -1787,12 +1786,14 @@ static const struct ib_device_ops hfi1_dev_ops = {
.owner = THIS_MODULE,
.driver_id = RDMA_DRIVER_HFI1,
- .alloc_hw_stats = alloc_hw_stats,
+ .alloc_hw_device_stats = hfi1_alloc_hw_device_stats,
+ .alloc_hw_port_stats = hfi_alloc_hw_port_stats,
.alloc_rdma_netdev = hfi1_vnic_alloc_rn,
+ .device_group = &ib_hfi1_attr_group,
.get_dev_fw_str = hfi1_get_dev_fw_str,
.get_hw_stats = get_hw_stats,
- .init_port = hfi1_create_port_files,
.modify_device = modify_device,
+ .port_groups = hfi1_attr_port_groups,
/* keep process mad in the driver */
.process_mad = hfi1_process_mad,
.rdma_netdev_get_params = hfi1_ipoib_rn_get_params,
@@ -1927,9 +1928,6 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
i,
ppd->pkeys);
- rdma_set_device_sysfs_group(&dd->verbs_dev.rdi.ibdev,
- &ib_hfi1_attr_group);
-
ret = rvt_register_device(&dd->verbs_dev.rdi);
if (ret)
goto err_verbs_txreq;
diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c
index 5d389ed55376..1b02d3bc9bae 100644
--- a/drivers/infiniband/hw/hns/hns_roce_alloc.c
+++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
@@ -63,65 +63,14 @@ int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj)
return ret;
}
-void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj,
- int rr)
+void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj)
{
- hns_roce_bitmap_free_range(bitmap, obj, 1, rr);
-}
-
-int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
- int align, unsigned long *obj)
-{
- int ret = 0;
- int i;
-
- if (likely(cnt == 1 && align == 1))
- return hns_roce_bitmap_alloc(bitmap, obj);
-
- spin_lock(&bitmap->lock);
-
- *obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
- bitmap->last, cnt, align - 1);
- if (*obj >= bitmap->max) {
- bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
- & bitmap->mask;
- *obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max, 0,
- cnt, align - 1);
- }
-
- if (*obj < bitmap->max) {
- for (i = 0; i < cnt; i++)
- set_bit(*obj + i, bitmap->table);
-
- if (*obj == bitmap->last) {
- bitmap->last = (*obj + cnt);
- if (bitmap->last >= bitmap->max)
- bitmap->last = 0;
- }
- *obj |= bitmap->top;
- } else {
- ret = -EINVAL;
- }
-
- spin_unlock(&bitmap->lock);
-
- return ret;
-}
-
-void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap,
- unsigned long obj, int cnt,
- int rr)
-{
- int i;
-
obj &= bitmap->max + bitmap->reserved_top - 1;
spin_lock(&bitmap->lock);
- for (i = 0; i < cnt; i++)
- clear_bit(obj + i, bitmap->table);
+ clear_bit(obj, bitmap->table);
- if (!rr)
- bitmap->last = min(bitmap->last, obj);
+ bitmap->last = min(bitmap->last, obj);
bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
& bitmap->mask;
spin_unlock(&bitmap->lock);
@@ -208,10 +157,10 @@ struct hns_roce_buf *hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size,
/* Calc the trunk size and num by required size and page_shift */
if (flags & HNS_ROCE_BUF_DIRECT) {
- buf->trunk_shift = ilog2(ALIGN(size, PAGE_SIZE));
+ buf->trunk_shift = order_base_2(ALIGN(size, PAGE_SIZE));
ntrunk = 1;
} else {
- buf->trunk_shift = ilog2(ALIGN(page_size, PAGE_SIZE));
+ buf->trunk_shift = order_base_2(ALIGN(page_size, PAGE_SIZE));
ntrunk = DIV_ROUND_UP(size, 1 << buf->trunk_shift);
}
@@ -252,50 +201,41 @@ struct hns_roce_buf *hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size,
}
int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
- int buf_cnt, int start, struct hns_roce_buf *buf)
+ int buf_cnt, struct hns_roce_buf *buf,
+ unsigned int page_shift)
{
- int i, end;
- int total;
-
- end = start + buf_cnt;
- if (end > buf->npages) {
- dev_err(hr_dev->dev,
- "failed to check kmem bufs, end %d + %d total %u!\n",
- start, buf_cnt, buf->npages);
+ unsigned int offset, max_size;
+ int total = 0;
+ int i;
+
+ if (page_shift > buf->trunk_shift) {
+ dev_err(hr_dev->dev, "failed to check kmem buf shift %u > %u\n",
+ page_shift, buf->trunk_shift);
return -EINVAL;
}
- total = 0;
- for (i = start; i < end; i++)
- bufs[total++] = hns_roce_buf_page(buf, i);
+ offset = 0;
+ max_size = buf->ntrunks << buf->trunk_shift;
+ for (i = 0; i < buf_cnt && offset < max_size; i++) {
+ bufs[total++] = hns_roce_buf_dma_addr(buf, offset);
+ offset += (1 << page_shift);
+ }
return total;
}
int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
- int buf_cnt, int start, struct ib_umem *umem,
+ int buf_cnt, struct ib_umem *umem,
unsigned int page_shift)
{
struct ib_block_iter biter;
int total = 0;
- int idx = 0;
- u64 addr;
-
- if (page_shift < HNS_HW_PAGE_SHIFT) {
- dev_err(hr_dev->dev, "failed to check umem page shift %u!\n",
- page_shift);
- return -EINVAL;
- }
/* convert system page cnt to hw page cnt */
rdma_umem_for_each_dma_block(umem, &biter, 1 << page_shift) {
- addr = rdma_block_iter_dma_address(&biter);
- if (idx >= start) {
- bufs[total++] = addr;
- if (total >= buf_cnt)
- goto done;
- }
- idx++;
+ bufs[total++] = rdma_block_iter_dma_address(&biter);
+ if (total >= buf_cnt)
+ goto done;
}
done:
@@ -305,13 +245,13 @@ done:
void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev)
{
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
- hns_roce_cleanup_xrcd_table(hr_dev);
+ ida_destroy(&hr_dev->xrcd_ida.ida);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
hns_roce_cleanup_srq_table(hr_dev);
hns_roce_cleanup_qp_table(hr_dev);
hns_roce_cleanup_cq_table(hr_dev);
- hns_roce_cleanup_mr_table(hr_dev);
- hns_roce_cleanup_pd_table(hr_dev);
+ ida_destroy(&hr_dev->mr_table.mtpt_ida.ida);
+ ida_destroy(&hr_dev->pd_ida.ida);
hns_roce_cleanup_uar_table(hr_dev);
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_common.h b/drivers/infiniband/hw/hns/hns_roce_common.h
index d5fe56c78394..b73e55de83ac 100644
--- a/drivers/infiniband/hw/hns/hns_roce_common.h
+++ b/drivers/infiniband/hw/hns/hns_roce_common.h
@@ -77,6 +77,14 @@
#define hr_reg_clear(ptr, field) _hr_reg_clear(ptr, field)
+#define _hr_reg_write_bool(ptr, field_type, field_h, field_l, val) \
+ ({ \
+ (val) ? _hr_reg_enable(ptr, field_type, field_h, field_l) : \
+ _hr_reg_clear(ptr, field_type, field_h, field_l); \
+ })
+
+#define hr_reg_write_bool(ptr, field, val) _hr_reg_write_bool(ptr, field, val)
+
#define _hr_reg_write(ptr, field_type, field_h, field_l, val) \
({ \
_hr_reg_clear(ptr, field_type, field_h, field_l); \
@@ -373,8 +381,8 @@
#define ROCEE_TX_CMQ_BASEADDR_L_REG 0x07000
#define ROCEE_TX_CMQ_BASEADDR_H_REG 0x07004
#define ROCEE_TX_CMQ_DEPTH_REG 0x07008
-#define ROCEE_TX_CMQ_HEAD_REG 0x07010
-#define ROCEE_TX_CMQ_TAIL_REG 0x07014
+#define ROCEE_TX_CMQ_PI_REG 0x07010
+#define ROCEE_TX_CMQ_CI_REG 0x07014
#define ROCEE_RX_CMQ_BASEADDR_L_REG 0x07018
#define ROCEE_RX_CMQ_BASEADDR_H_REG 0x0701c
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 800884b074f2..1e9c3c5bee68 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -154,7 +154,7 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
hr_cq->cons_index = 0;
hr_cq->arm_sn = 1;
- atomic_set(&hr_cq->refcount, 1);
+ refcount_set(&hr_cq->refcount, 1);
init_completion(&hr_cq->free);
return 0;
@@ -188,7 +188,7 @@ static void free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
/* wait for all interrupt processed */
- if (atomic_dec_and_test(&hr_cq->refcount))
+ if (refcount_dec_and_test(&hr_cq->refcount))
complete(&hr_cq->free);
wait_for_completion(&hr_cq->free);
@@ -202,13 +202,13 @@ static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
struct hns_roce_buf_attr buf_attr = {};
int ret;
- buf_attr.page_shift = hr_dev->caps.cqe_buf_pg_sz + HNS_HW_PAGE_SHIFT;
+ buf_attr.page_shift = hr_dev->caps.cqe_buf_pg_sz + PAGE_SHIFT;
buf_attr.region[0].size = hr_cq->cq_depth * hr_cq->cqe_size;
buf_attr.region[0].hopnum = hr_dev->caps.cqe_hop_num;
buf_attr.region_count = 1;
ret = hns_roce_mtr_create(hr_dev, &hr_cq->mtr, &buf_attr,
- hr_dev->caps.cqe_ba_pg_sz + HNS_HW_PAGE_SHIFT,
+ hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT,
udata, addr);
if (ret)
ibdev_err(ibdev, "failed to alloc CQ mtr, ret = %d.\n", ret);
@@ -234,8 +234,7 @@ static int alloc_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
udata->outlen >= offsetofend(typeof(*resp), cap_flags)) {
uctx = rdma_udata_to_drv_context(udata,
struct hns_roce_ucontext, ibucontext);
- err = hns_roce_db_map_user(uctx, udata, addr,
- &hr_cq->db);
+ err = hns_roce_db_map_user(uctx, addr, &hr_cq->db);
if (err)
return err;
hr_cq->flags |= HNS_ROCE_CQ_FLAG_RECORD_DB;
@@ -481,7 +480,7 @@ void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
return;
}
- atomic_inc(&hr_cq->refcount);
+ refcount_inc(&hr_cq->refcount);
ibcq = &hr_cq->ib_cq;
if (ibcq->event_handler) {
@@ -491,7 +490,7 @@ void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
ibcq->event_handler(&event, ibcq->cq_context);
}
- if (atomic_dec_and_test(&hr_cq->refcount))
+ if (refcount_dec_and_test(&hr_cq->refcount))
complete(&hr_cq->free);
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_db.c b/drivers/infiniband/hw/hns/hns_roce_db.c
index 5cb7376ce978..d40ea3d87260 100644
--- a/drivers/infiniband/hw/hns/hns_roce_db.c
+++ b/drivers/infiniband/hw/hns/hns_roce_db.c
@@ -8,8 +8,7 @@
#include <rdma/ib_umem.h>
#include "hns_roce_device.h"
-int hns_roce_db_map_user(struct hns_roce_ucontext *context,
- struct ib_udata *udata, unsigned long virt,
+int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt,
struct hns_roce_db *db)
{
unsigned long page_addr = virt & PAGE_MASK;
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 97800d2b9d39..991f65269fa6 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -47,8 +47,6 @@
#define HNS_ROCE_IB_MIN_SQ_STRIDE 6
-#define HNS_ROCE_BA_SIZE (32 * 4096)
-
#define BA_BYTE_LEN 8
/* Hardware specification only for v1 engine */
@@ -97,9 +95,6 @@
#define HNS_ROCE_HOP_NUM_0 0xff
-#define BITMAP_NO_RR 0
-#define BITMAP_RR 1
-
#define MR_TYPE_MR 0x00
#define MR_TYPE_FRMR 0x01
#define MR_TYPE_DMA 0x03
@@ -258,14 +253,18 @@ struct hns_roce_bitmap {
unsigned long *table;
};
+struct hns_roce_ida {
+ struct ida ida;
+ u32 min; /* Lowest ID to allocate. */
+ u32 max; /* Highest ID to allocate. */
+};
+
/* For Hardware Entry Memory */
struct hns_roce_hem_table {
/* HEM type: 0 = qpc, 1 = mtt, 2 = cqc, 3 = srq, 4 = other */
u32 type;
/* HEM array elment num */
unsigned long num_hem;
- /* HEM entry record obj total num */
- unsigned long num_obj;
/* Single obj size */
unsigned long obj_size;
unsigned long table_chunk_size;
@@ -338,7 +337,7 @@ struct hns_roce_mw {
struct hns_roce_mr {
struct ib_mr ibmr;
- u64 iova; /* MR's virtual orignal addr */
+ u64 iova; /* MR's virtual original addr */
u64 size; /* Address range of MR */
u32 key; /* Key of MR */
u32 pd; /* PD num of MR */
@@ -352,7 +351,7 @@ struct hns_roce_mr {
};
struct hns_roce_mr_table {
- struct hns_roce_bitmap mtpt_bitmap;
+ struct hns_roce_ida mtpt_ida;
struct hns_roce_hem_table mtpt_table;
};
@@ -446,7 +445,7 @@ struct hns_roce_cq {
int cqe_size;
unsigned long cqn;
u32 vector;
- atomic_t refcount;
+ refcount_t refcount;
struct completion free;
struct list_head sq_list; /* all qps on this send cq */
struct list_head rq_list; /* all qps on this recv cq */
@@ -473,7 +472,7 @@ struct hns_roce_srq {
u32 xrcdn;
void __iomem *db_reg;
- atomic_t refcount;
+ refcount_t refcount;
struct completion free;
struct hns_roce_mtr buf_mtr;
@@ -555,7 +554,6 @@ struct hns_roce_cmd_context {
struct hns_roce_cmdq {
struct dma_pool *pool;
- struct mutex hcr_mutex;
struct semaphore poll_sem;
/*
* Event mode: cmd register mutex protection,
@@ -642,7 +640,7 @@ struct hns_roce_qp {
u32 xrcdn;
- atomic_t refcount;
+ refcount_t refcount;
struct completion free;
struct hns_roce_sge sge;
@@ -745,6 +743,7 @@ struct hns_roce_caps {
u32 max_rq_sg;
u32 max_extend_sg;
u32 num_qps;
+ u32 num_pi_qps;
u32 reserved_qps;
int num_qpc_timer;
int num_cqc_timer;
@@ -854,8 +853,7 @@ struct hns_roce_caps {
u32 gmv_buf_pg_sz;
u32 gmv_hop_num;
u32 sl_num;
- u32 tsq_buf_pg_sz;
- u32 tpq_buf_pg_sz;
+ u32 llm_buf_pg_sz;
u32 chunk_sz; /* chunk size in non multihop mode */
u64 flags;
u16 default_ceq_max_cnt;
@@ -963,8 +961,8 @@ struct hns_roce_dev {
void __iomem *priv_addr;
struct hns_roce_cmdq cmd;
- struct hns_roce_bitmap pd_bitmap;
- struct hns_roce_bitmap xrcd_bitmap;
+ struct hns_roce_ida pd_ida;
+ struct hns_roce_ida xrcd_ida;
struct hns_roce_uar_table uar_table;
struct hns_roce_mr_table mr_table;
struct hns_roce_cq_table cq_table;
@@ -1052,7 +1050,7 @@ static inline void hns_roce_write64_k(__le32 val[2], void __iomem *dest)
static inline struct hns_roce_qp
*__hns_roce_qp_lookup(struct hns_roce_dev *hr_dev, u32 qpn)
{
- return xa_load(&hr_dev->qp_table_xa, qpn & (hr_dev->caps.num_qps - 1));
+ return xa_load(&hr_dev->qp_table_xa, qpn);
}
static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf,
@@ -1062,14 +1060,18 @@ static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf,
(offset & ((1 << buf->trunk_shift) - 1));
}
-static inline dma_addr_t hns_roce_buf_page(struct hns_roce_buf *buf, u32 idx)
+static inline dma_addr_t hns_roce_buf_dma_addr(struct hns_roce_buf *buf,
+ unsigned int offset)
{
- unsigned int offset = idx << buf->page_shift;
-
return buf->trunk_list[offset >> buf->trunk_shift].map +
(offset & ((1 << buf->trunk_shift) - 1));
}
+static inline dma_addr_t hns_roce_buf_page(struct hns_roce_buf *buf, u32 idx)
+{
+ return hns_roce_buf_dma_addr(buf, idx << buf->page_shift);
+}
+
#define hr_hw_page_align(x) ALIGN(x, 1 << HNS_HW_PAGE_SHIFT)
static inline u64 to_hr_hw_page_addr(u64 addr)
@@ -1141,33 +1143,24 @@ void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev,
int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
dma_addr_t *pages, unsigned int page_cnt);
-int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev);
-int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev);
+void hns_roce_init_pd_table(struct hns_roce_dev *hr_dev);
+void hns_roce_init_mr_table(struct hns_roce_dev *hr_dev);
void hns_roce_init_cq_table(struct hns_roce_dev *hr_dev);
-int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev);
+void hns_roce_init_qp_table(struct hns_roce_dev *hr_dev);
int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev);
-int hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev);
+void hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev);
-void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev);
-void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_srq_table(struct hns_roce_dev *hr_dev);
-void hns_roce_cleanup_xrcd_table(struct hns_roce_dev *hr_dev);
int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj);
-void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj,
- int rr);
+void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj);
int hns_roce_bitmap_init(struct hns_roce_bitmap *bitmap, u32 num, u32 mask,
u32 reserved_bot, u32 resetrved_top);
void hns_roce_bitmap_cleanup(struct hns_roce_bitmap *bitmap);
void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev);
-int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
- int align, unsigned long *obj);
-void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap,
- unsigned long obj, int cnt,
- int rr);
int hns_roce_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata);
@@ -1206,9 +1199,10 @@ struct hns_roce_buf *hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size,
u32 page_shift, u32 flags);
int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
- int buf_cnt, int start, struct hns_roce_buf *buf);
+ int buf_cnt, struct hns_roce_buf *buf,
+ unsigned int page_shift);
int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
- int buf_cnt, int start, struct ib_umem *umem,
+ int buf_cnt, struct ib_umem *umem,
unsigned int page_shift);
int hns_roce_create_srq(struct ib_srq *srq,
@@ -1248,8 +1242,7 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata);
int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
-int hns_roce_db_map_user(struct hns_roce_ucontext *context,
- struct ib_udata *udata, unsigned long virt,
+int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt,
struct hns_roce_db *db);
void hns_roce_db_unmap_user(struct hns_roce_ucontext *context,
struct hns_roce_db *db);
@@ -1259,6 +1252,7 @@ void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db);
void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn);
void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
+void flush_cqe(struct hns_roce_dev *dev, struct hns_roce_qp *qp);
void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type);
void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type);
u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u32 port, int gid_index);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index cfd2e1b60c7f..fa15d79eabb3 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -36,9 +36,6 @@
#include "hns_roce_hem.h"
#include "hns_roce_common.h"
-#define DMA_ADDR_T_SHIFT 12
-#define BT_BA_SHIFT 32
-
#define HEM_INDEX_BUF BIT(0)
#define HEM_INDEX_L0 BIT(1)
#define HEM_INDEX_L1 BIT(2)
@@ -227,8 +224,7 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
chunk_ba_num = mhop->bt_chunk_size / BA_BYTE_LEN;
chunk_size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size :
mhop->bt_chunk_size;
- table_idx = (*obj & (table->num_obj - 1)) /
- (chunk_size / table->obj_size);
+ table_idx = *obj / (chunk_size / table->obj_size);
switch (bt_num) {
case 3:
mhop->l2_idx = table_idx & (chunk_ba_num - 1);
@@ -271,7 +267,6 @@ static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
if (!hem)
return NULL;
- hem->refcount = 0;
INIT_LIST_HEAD(&hem->chunk_list);
order = get_order(hem_alloc_size);
@@ -338,81 +333,6 @@ void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem)
kfree(hem);
}
-static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
- struct hns_roce_hem_table *table, unsigned long obj)
-{
- spinlock_t *lock = &hr_dev->bt_cmd_lock;
- struct device *dev = hr_dev->dev;
- struct hns_roce_hem_iter iter;
- void __iomem *bt_cmd;
- __le32 bt_cmd_val[2];
- __le32 bt_cmd_h = 0;
- unsigned long flags;
- __le32 bt_cmd_l;
- int ret = 0;
- u64 bt_ba;
- long end;
-
- /* Find the HEM(Hardware Entry Memory) entry */
- unsigned long i = (obj & (table->num_obj - 1)) /
- (table->table_chunk_size / table->obj_size);
-
- switch (table->type) {
- case HEM_TYPE_QPC:
- case HEM_TYPE_MTPT:
- case HEM_TYPE_CQC:
- case HEM_TYPE_SRQC:
- roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, table->type);
- break;
- default:
- return ret;
- }
-
- roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
- roce_set_bit(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
- roce_set_bit(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
-
- /* Currently iter only a chunk */
- for (hns_roce_hem_first(table->hem[i], &iter);
- !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
- bt_ba = hns_roce_hem_addr(&iter) >> DMA_ADDR_T_SHIFT;
-
- spin_lock_irqsave(lock, flags);
-
- bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
-
- end = HW_SYNC_TIMEOUT_MSECS;
- while (end > 0) {
- if (!(readl(bt_cmd) >> BT_CMD_SYNC_SHIFT))
- break;
-
- mdelay(HW_SYNC_SLEEP_TIME_INTERVAL);
- end -= HW_SYNC_SLEEP_TIME_INTERVAL;
- }
-
- if (end <= 0) {
- dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
- spin_unlock_irqrestore(lock, flags);
- return -EBUSY;
- }
-
- bt_cmd_l = cpu_to_le32(bt_ba);
- roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S,
- bt_ba >> BT_BA_SHIFT);
-
- bt_cmd_val[0] = bt_cmd_l;
- bt_cmd_val[1] = bt_cmd_h;
- hns_roce_write64_k(bt_cmd_val,
- hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
- spin_unlock_irqrestore(lock, flags);
- }
-
- return ret;
-}
-
static int calc_hem_config(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long obj,
struct hns_roce_hem_mhop *mhop,
@@ -618,7 +538,7 @@ static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev,
mutex_lock(&table->mutex);
if (table->hem[index.buf]) {
- ++table->hem[index.buf]->refcount;
+ refcount_inc(&table->hem[index.buf]->refcount);
goto out;
}
@@ -637,7 +557,7 @@ static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev,
}
}
- ++table->hem[index.buf]->refcount;
+ refcount_set(&table->hem[index.buf]->refcount, 1);
goto out;
err_alloc:
@@ -657,13 +577,12 @@ int hns_roce_table_get(struct hns_roce_dev *hr_dev,
if (hns_roce_check_whether_mhop(hr_dev, table->type))
return hns_roce_table_mhop_get(hr_dev, table, obj);
- i = (obj & (table->num_obj - 1)) / (table->table_chunk_size /
- table->obj_size);
+ i = obj / (table->table_chunk_size / table->obj_size);
mutex_lock(&table->mutex);
if (table->hem[i]) {
- ++table->hem[i]->refcount;
+ refcount_inc(&table->hem[i]->refcount);
goto out;
}
@@ -678,7 +597,7 @@ int hns_roce_table_get(struct hns_roce_dev *hr_dev,
}
/* Set HEM base address(128K/page, pa) to Hardware */
- if (hns_roce_set_hem(hr_dev, table, obj)) {
+ if (hr_dev->hw->set_hem(hr_dev, table, obj, HEM_HOP_STEP_DIRECT)) {
hns_roce_free_hem(hr_dev, table->hem[i]);
table->hem[i] = NULL;
ret = -ENODEV;
@@ -686,7 +605,7 @@ int hns_roce_table_get(struct hns_roce_dev *hr_dev,
goto out;
}
- ++table->hem[i]->refcount;
+ refcount_set(&table->hem[i]->refcount, 1);
out:
mutex_unlock(&table->mutex);
return ret;
@@ -753,11 +672,11 @@ static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev,
return;
}
- mutex_lock(&table->mutex);
- if (check_refcount && (--table->hem[index.buf]->refcount > 0)) {
- mutex_unlock(&table->mutex);
+ if (!check_refcount)
+ mutex_lock(&table->mutex);
+ else if (!refcount_dec_and_mutex_lock(&table->hem[index.buf]->refcount,
+ &table->mutex))
return;
- }
clear_mhop_hem(hr_dev, table, obj, &mhop, &index);
free_mhop_hem(hr_dev, table, &mhop, &index);
@@ -776,19 +695,17 @@ void hns_roce_table_put(struct hns_roce_dev *hr_dev,
return;
}
- i = (obj & (table->num_obj - 1)) /
- (table->table_chunk_size / table->obj_size);
+ i = obj / (table->table_chunk_size / table->obj_size);
- mutex_lock(&table->mutex);
+ if (!refcount_dec_and_mutex_lock(&table->hem[i]->refcount,
+ &table->mutex))
+ return;
- if (--table->hem[i]->refcount == 0) {
- /* Clear HEM base address */
- if (hr_dev->hw->clear_hem(hr_dev, table, obj, 0))
- dev_warn(dev, "Clear HEM base address failed.\n");
+ if (hr_dev->hw->clear_hem(hr_dev, table, obj, HEM_HOP_STEP_DIRECT))
+ dev_warn(dev, "failed to clear HEM base address.\n");
- hns_roce_free_hem(hr_dev, table->hem[i]);
- table->hem[i] = NULL;
- }
+ hns_roce_free_hem(hr_dev, table->hem[i]);
+ table->hem[i] = NULL;
mutex_unlock(&table->mutex);
}
@@ -816,8 +733,8 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
if (!hns_roce_check_whether_mhop(hr_dev, table->type)) {
obj_per_chunk = table->table_chunk_size / table->obj_size;
- hem = table->hem[(obj & (table->num_obj - 1)) / obj_per_chunk];
- idx_offset = (obj & (table->num_obj - 1)) % obj_per_chunk;
+ hem = table->hem[obj / obj_per_chunk];
+ idx_offset = obj % obj_per_chunk;
dma_offset = offset = idx_offset * table->obj_size;
} else {
u32 seg_size = 64; /* 8 bytes per BA and 8 BA per segment */
@@ -834,8 +751,7 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
hem_idx = i;
hem = table->hem[hem_idx];
- dma_offset = offset = (obj & (table->num_obj - 1)) * seg_size %
- mhop.bt_chunk_size;
+ dma_offset = offset = obj * seg_size % mhop.bt_chunk_size;
if (mhop.hop_num == 2)
dma_offset = offset = 0;
}
@@ -877,7 +793,7 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
if (!hns_roce_check_whether_mhop(hr_dev, type)) {
table->table_chunk_size = hr_dev->caps.chunk_sz;
obj_per_chunk = table->table_chunk_size / obj_size;
- num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk;
+ num_hem = DIV_ROUND_UP(nobj, obj_per_chunk);
table->hem = kcalloc(num_hem, sizeof(*table->hem), GFP_KERNEL);
if (!table->hem)
@@ -899,8 +815,9 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
hop_num = mhop.hop_num;
obj_per_chunk = buf_chunk_size / obj_size;
- num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk;
+ num_hem = DIV_ROUND_UP(nobj, obj_per_chunk);
bt_chunk_num = bt_chunk_size / BA_BYTE_LEN;
+
if (type >= HEM_TYPE_MTT)
num_bt_l0 = bt_chunk_num;
@@ -912,8 +829,7 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
if (check_whether_bt_num_3(type, hop_num)) {
unsigned long num_bt_l1;
- num_bt_l1 = (num_hem + bt_chunk_num - 1) /
- bt_chunk_num;
+ num_bt_l1 = DIV_ROUND_UP(num_hem, bt_chunk_num);
table->bt_l1 = kcalloc(num_bt_l1,
sizeof(*table->bt_l1),
GFP_KERNEL);
@@ -945,7 +861,6 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
table->type = type;
table->num_hem = num_hem;
- table->num_obj = nobj;
table->obj_size = obj_size;
table->lowmem = use_lowmem;
mutex_init(&table->mutex);
@@ -1053,7 +968,7 @@ void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev)
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
}
-struct roce_hem_item {
+struct hns_roce_hem_item {
struct list_head list; /* link all hems in the same bt level */
struct list_head sibling; /* link all hems in last hop for mtt */
void *addr;
@@ -1063,12 +978,18 @@ struct roce_hem_item {
int end; /* end buf offset in this hem */
};
-static struct roce_hem_item *hem_list_alloc_item(struct hns_roce_dev *hr_dev,
- int start, int end,
- int count, bool exist_bt,
- int bt_level)
+/* All HEM items are linked in a tree structure */
+struct hns_roce_hem_head {
+ struct list_head branch[HNS_ROCE_MAX_BT_REGION];
+ struct list_head root;
+ struct list_head leaf;
+};
+
+static struct hns_roce_hem_item *
+hem_list_alloc_item(struct hns_roce_dev *hr_dev, int start, int end, int count,
+ bool exist_bt, int bt_level)
{
- struct roce_hem_item *hem;
+ struct hns_roce_hem_item *hem;
hem = kzalloc(sizeof(*hem), GFP_KERNEL);
if (!hem)
@@ -1093,7 +1014,7 @@ static struct roce_hem_item *hem_list_alloc_item(struct hns_roce_dev *hr_dev,
}
static void hem_list_free_item(struct hns_roce_dev *hr_dev,
- struct roce_hem_item *hem, bool exist_bt)
+ struct hns_roce_hem_item *hem, bool exist_bt)
{
if (exist_bt)
dma_free_coherent(hr_dev->dev, hem->count * BA_BYTE_LEN,
@@ -1104,7 +1025,7 @@ static void hem_list_free_item(struct hns_roce_dev *hr_dev,
static void hem_list_free_all(struct hns_roce_dev *hr_dev,
struct list_head *head, bool exist_bt)
{
- struct roce_hem_item *hem, *temp_hem;
+ struct hns_roce_hem_item *hem, *temp_hem;
list_for_each_entry_safe(hem, temp_hem, head, list) {
list_del(&hem->list);
@@ -1120,24 +1041,24 @@ static void hem_list_link_bt(struct hns_roce_dev *hr_dev, void *base_addr,
/* assign L0 table address to hem from root bt */
static void hem_list_assign_bt(struct hns_roce_dev *hr_dev,
- struct roce_hem_item *hem, void *cpu_addr,
+ struct hns_roce_hem_item *hem, void *cpu_addr,
u64 phy_addr)
{
hem->addr = cpu_addr;
hem->dma_addr = (dma_addr_t)phy_addr;
}
-static inline bool hem_list_page_is_in_range(struct roce_hem_item *hem,
+static inline bool hem_list_page_is_in_range(struct hns_roce_hem_item *hem,
int offset)
{
return (hem->start <= offset && offset <= hem->end);
}
-static struct roce_hem_item *hem_list_search_item(struct list_head *ba_list,
- int page_offset)
+static struct hns_roce_hem_item *hem_list_search_item(struct list_head *ba_list,
+ int page_offset)
{
- struct roce_hem_item *hem, *temp_hem;
- struct roce_hem_item *found = NULL;
+ struct hns_roce_hem_item *hem, *temp_hem;
+ struct hns_roce_hem_item *found = NULL;
list_for_each_entry_safe(hem, temp_hem, ba_list, list) {
if (hem_list_page_is_in_range(hem, page_offset)) {
@@ -1161,7 +1082,7 @@ static bool hem_list_is_bottom_bt(int hopnum, int bt_level)
return bt_level >= (hopnum ? hopnum - 1 : hopnum);
}
-/**
+/*
* calc base address entries num
* @hopnum: num of mutihop addressing
* @bt_level: base address table level
@@ -1194,7 +1115,7 @@ static u32 hem_list_calc_ba_range(int hopnum, int bt_level, int unit)
return step;
}
-/**
+/*
* calc the root ba entries which could cover all regions
* @regions: buf region array
* @region_cnt: array size of @regions
@@ -1227,9 +1148,9 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
int offset, struct list_head *mid_bt,
struct list_head *btm_bt)
{
- struct roce_hem_item *hem_ptrs[HNS_ROCE_MAX_BT_LEVEL] = { NULL };
+ struct hns_roce_hem_item *hem_ptrs[HNS_ROCE_MAX_BT_LEVEL] = { NULL };
struct list_head temp_list[HNS_ROCE_MAX_BT_LEVEL];
- struct roce_hem_item *cur, *pre;
+ struct hns_roce_hem_item *cur, *pre;
const int hopnum = r->hopnum;
int start_aligned;
int distance;
@@ -1307,56 +1228,96 @@ err_exit:
return ret;
}
-static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
- struct hns_roce_hem_list *hem_list, int unit,
- const struct hns_roce_buf_region *regions,
- int region_cnt)
+static struct hns_roce_hem_item *
+alloc_root_hem(struct hns_roce_dev *hr_dev, int unit, int *max_ba_num,
+ const struct hns_roce_buf_region *regions, int region_cnt)
{
- struct list_head temp_list[HNS_ROCE_MAX_BT_REGION];
- struct roce_hem_item *hem, *temp_hem, *root_hem;
const struct hns_roce_buf_region *r;
- struct list_head temp_root;
- struct list_head temp_btm;
- void *cpu_base;
- u64 phy_base;
- int ret = 0;
+ struct hns_roce_hem_item *hem;
int ba_num;
int offset;
- int total;
- int step;
- int i;
-
- r = &regions[0];
- root_hem = hem_list_search_item(&hem_list->root_bt, r->offset);
- if (root_hem)
- return 0;
ba_num = hns_roce_hem_list_calc_root_ba(regions, region_cnt, unit);
if (ba_num < 1)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
if (ba_num > unit)
- return -ENOBUFS;
+ return ERR_PTR(-ENOBUFS);
- ba_num = min_t(int, ba_num, unit);
- INIT_LIST_HEAD(&temp_root);
- offset = r->offset;
+ offset = regions[0].offset;
/* indicate to last region */
r = &regions[region_cnt - 1];
- root_hem = hem_list_alloc_item(hr_dev, offset, r->offset + r->count - 1,
- ba_num, true, 0);
- if (!root_hem)
+ hem = hem_list_alloc_item(hr_dev, offset, r->offset + r->count - 1,
+ ba_num, true, 0);
+ if (!hem)
+ return ERR_PTR(-ENOMEM);
+
+ *max_ba_num = ba_num;
+
+ return hem;
+}
+
+static int alloc_fake_root_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
+ u64 phy_base, const struct hns_roce_buf_region *r,
+ struct list_head *branch_head,
+ struct list_head *leaf_head)
+{
+ struct hns_roce_hem_item *hem;
+
+ hem = hem_list_alloc_item(hr_dev, r->offset, r->offset + r->count - 1,
+ r->count, false, 0);
+ if (!hem)
return -ENOMEM;
- list_add(&root_hem->list, &temp_root);
- hem_list->root_ba = root_hem->dma_addr;
+ hem_list_assign_bt(hr_dev, hem, cpu_base, phy_base);
+ list_add(&hem->list, branch_head);
+ list_add(&hem->sibling, leaf_head);
- INIT_LIST_HEAD(&temp_btm);
- for (i = 0; i < region_cnt; i++)
- INIT_LIST_HEAD(&temp_list[i]);
+ return r->count;
+}
+
+static int setup_middle_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
+ int unit, const struct hns_roce_buf_region *r,
+ const struct list_head *branch_head)
+{
+ struct hns_roce_hem_item *hem, *temp_hem;
+ int total = 0;
+ int offset;
+ int step;
+
+ step = hem_list_calc_ba_range(r->hopnum, 1, unit);
+ if (step < 1)
+ return -EINVAL;
+
+ /* if exist mid bt, link L1 to L0 */
+ list_for_each_entry_safe(hem, temp_hem, branch_head, list) {
+ offset = (hem->start - r->offset) / step * BA_BYTE_LEN;
+ hem_list_link_bt(hr_dev, cpu_base + offset, hem->dma_addr);
+ total++;
+ }
+
+ return total;
+}
+
+static int
+setup_root_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem_list *hem_list,
+ int unit, int max_ba_num, struct hns_roce_hem_head *head,
+ const struct hns_roce_buf_region *regions, int region_cnt)
+{
+ const struct hns_roce_buf_region *r;
+ struct hns_roce_hem_item *root_hem;
+ void *cpu_base;
+ u64 phy_base;
+ int i, total;
+ int ret;
+
+ root_hem = list_first_entry(&head->root,
+ struct hns_roce_hem_item, list);
+ if (!root_hem)
+ return -ENOMEM;
total = 0;
- for (i = 0; i < region_cnt && total < ba_num; i++) {
+ for (i = 0; i < region_cnt && total < max_ba_num; i++) {
r = &regions[i];
if (!r->count)
continue;
@@ -1368,48 +1329,64 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
/* if hopnum is 0 or 1, cut a new fake hem from the root bt
* which's address share to all regions.
*/
- if (hem_list_is_bottom_bt(r->hopnum, 0)) {
- hem = hem_list_alloc_item(hr_dev, r->offset,
- r->offset + r->count - 1,
- r->count, false, 0);
- if (!hem) {
- ret = -ENOMEM;
- goto err_exit;
- }
- hem_list_assign_bt(hr_dev, hem, cpu_base, phy_base);
- list_add(&hem->list, &temp_list[i]);
- list_add(&hem->sibling, &temp_btm);
- total += r->count;
- } else {
- step = hem_list_calc_ba_range(r->hopnum, 1, unit);
- if (step < 1) {
- ret = -EINVAL;
- goto err_exit;
- }
- /* if exist mid bt, link L1 to L0 */
- list_for_each_entry_safe(hem, temp_hem,
- &hem_list->mid_bt[i][1], list) {
- offset = (hem->start - r->offset) / step *
- BA_BYTE_LEN;
- hem_list_link_bt(hr_dev, cpu_base + offset,
- hem->dma_addr);
- total++;
- }
- }
+ if (hem_list_is_bottom_bt(r->hopnum, 0))
+ ret = alloc_fake_root_bt(hr_dev, cpu_base, phy_base, r,
+ &head->branch[i], &head->leaf);
+ else
+ ret = setup_middle_bt(hr_dev, cpu_base, unit, r,
+ &hem_list->mid_bt[i][1]);
+
+ if (ret < 0)
+ return ret;
+
+ total += ret;
}
- list_splice(&temp_btm, &hem_list->btm_bt);
- list_splice(&temp_root, &hem_list->root_bt);
+ list_splice(&head->leaf, &hem_list->btm_bt);
+ list_splice(&head->root, &hem_list->root_bt);
for (i = 0; i < region_cnt; i++)
- list_splice(&temp_list[i], &hem_list->mid_bt[i][0]);
+ list_splice(&head->branch[i], &hem_list->mid_bt[i][0]);
return 0;
+}
-err_exit:
+static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_list *hem_list, int unit,
+ const struct hns_roce_buf_region *regions,
+ int region_cnt)
+{
+ struct hns_roce_hem_item *root_hem;
+ struct hns_roce_hem_head head;
+ int max_ba_num;
+ int ret;
+ int i;
+
+ root_hem = hem_list_search_item(&hem_list->root_bt, regions[0].offset);
+ if (root_hem)
+ return 0;
+
+ max_ba_num = 0;
+ root_hem = alloc_root_hem(hr_dev, unit, &max_ba_num, regions,
+ region_cnt);
+ if (IS_ERR(root_hem))
+ return PTR_ERR(root_hem);
+
+ /* List head for storing all allocated HEM items */
+ INIT_LIST_HEAD(&head.root);
+ INIT_LIST_HEAD(&head.leaf);
for (i = 0; i < region_cnt; i++)
- hem_list_free_all(hr_dev, &temp_list[i], false);
+ INIT_LIST_HEAD(&head.branch[i]);
- hem_list_free_all(hr_dev, &temp_root, true);
+ hem_list->root_ba = root_hem->dma_addr;
+ list_add(&root_hem->list, &head.root);
+ ret = setup_root_hem(hr_dev, hem_list, unit, max_ba_num, &head, regions,
+ region_cnt);
+ if (ret) {
+ for (i = 0; i < region_cnt; i++)
+ hem_list_free_all(hr_dev, &head.branch[i], false);
+
+ hem_list_free_all(hr_dev, &head.root, true);
+ }
return ret;
}
@@ -1495,7 +1472,7 @@ void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev,
int offset, int *mtt_cnt, u64 *phy_addr)
{
struct list_head *head = &hem_list->btm_bt;
- struct roce_hem_item *hem, *temp_hem;
+ struct hns_roce_hem_item *hem, *temp_hem;
void *cpu_base = NULL;
u64 phy_base = 0;
int nr = 0;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.h b/drivers/infiniband/hw/hns/hns_roce_hem.h
index 13fdeb3274e7..2d84a6b3f05d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.h
@@ -34,9 +34,7 @@
#ifndef _HNS_ROCE_HEM_H
#define _HNS_ROCE_HEM_H
-#define HW_SYNC_SLEEP_TIME_INTERVAL 20
-#define HW_SYNC_TIMEOUT_MSECS (25 * HW_SYNC_SLEEP_TIME_INTERVAL)
-#define BT_CMD_SYNC_SHIFT 31
+#define HEM_HOP_STEP_DIRECT 0xff
enum {
/* MAP HEM(Hardware Entry Memory) */
@@ -74,11 +72,6 @@ enum {
(type >= HEM_TYPE_MTT && hop_num == 1) || \
(type >= HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0))
-enum {
- HNS_ROCE_HEM_PAGE_SHIFT = 12,
- HNS_ROCE_HEM_PAGE_SIZE = 1 << HNS_ROCE_HEM_PAGE_SHIFT,
-};
-
struct hns_roce_hem_chunk {
struct list_head list;
int npages;
@@ -88,8 +81,8 @@ struct hns_roce_hem_chunk {
};
struct hns_roce_hem {
- struct list_head chunk_list;
- int refcount;
+ struct list_head chunk_list;
+ refcount_t refcount;
};
struct hns_roce_hem_iter {
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 620acf66b22c..a3305d196675 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -462,6 +462,81 @@ static void hns_roce_set_db_event_mode(struct hns_roce_dev *hr_dev,
roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
}
+static int hns_roce_v1_set_hem(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, int obj,
+ int step_idx)
+{
+ spinlock_t *lock = &hr_dev->bt_cmd_lock;
+ struct device *dev = hr_dev->dev;
+ struct hns_roce_hem_iter iter;
+ void __iomem *bt_cmd;
+ __le32 bt_cmd_val[2];
+ __le32 bt_cmd_h = 0;
+ unsigned long flags;
+ __le32 bt_cmd_l;
+ int ret = 0;
+ u64 bt_ba;
+ long end;
+
+ /* Find the HEM(Hardware Entry Memory) entry */
+ unsigned long i = obj / (table->table_chunk_size / table->obj_size);
+
+ switch (table->type) {
+ case HEM_TYPE_QPC:
+ case HEM_TYPE_MTPT:
+ case HEM_TYPE_CQC:
+ case HEM_TYPE_SRQC:
+ roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
+ ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, table->type);
+ break;
+ default:
+ return ret;
+ }
+
+ roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
+ ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
+ roce_set_bit(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
+ roce_set_bit(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
+
+ /* Currently iter only a chunk */
+ for (hns_roce_hem_first(table->hem[i], &iter);
+ !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
+ bt_ba = hns_roce_hem_addr(&iter) >> HNS_HW_PAGE_SHIFT;
+
+ spin_lock_irqsave(lock, flags);
+
+ bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
+
+ end = HW_SYNC_TIMEOUT_MSECS;
+ while (end > 0) {
+ if (!(readl(bt_cmd) >> BT_CMD_SYNC_SHIFT))
+ break;
+
+ mdelay(HW_SYNC_SLEEP_TIME_INTERVAL);
+ end -= HW_SYNC_SLEEP_TIME_INTERVAL;
+ }
+
+ if (end <= 0) {
+ dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
+ spin_unlock_irqrestore(lock, flags);
+ return -EBUSY;
+ }
+
+ bt_cmd_l = cpu_to_le32(bt_ba);
+ roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
+ ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S,
+ upper_32_bits(bt_ba));
+
+ bt_cmd_val[0] = bt_cmd_l;
+ bt_cmd_val[1] = bt_cmd_h;
+ hns_roce_write64_k(bt_cmd_val,
+ hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
+ spin_unlock_irqrestore(lock, flags);
+ }
+
+ return ret;
+}
+
static void hns_roce_set_db_ext_mode(struct hns_roce_dev *hr_dev, u32 sdb_mode,
u32 odb_mode)
{
@@ -1123,8 +1198,7 @@ free_mr:
dev_dbg(dev, "Free mr 0x%x use 0x%x us.\n",
mr->key, jiffies_to_usecs(jiffies) - jiffies_to_usecs(start));
- hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
- key_to_hw_index(mr->key), 0);
+ ida_free(&hr_dev->mr_table.mtpt_ida.ida, (int)key_to_hw_index(mr->key));
hns_roce_mtr_destroy(hr_dev, &mr->pbl_mtr);
kfree(mr);
@@ -4352,6 +4426,7 @@ static const struct hns_roce_hw hns_roce_hw_v1 = {
.set_mtu = hns_roce_v1_set_mtu,
.write_mtpt = hns_roce_v1_write_mtpt,
.write_cqc = hns_roce_v1_write_cqc,
+ .set_hem = hns_roce_v1_set_hem,
.clear_hem = hns_roce_v1_clear_hem,
.modify_qp = hns_roce_v1_modify_qp,
.dereg_mr = hns_roce_v1_dereg_mr,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
index 84383236e47d..60fdcbae6729 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
@@ -1085,6 +1085,11 @@ struct hns_roce_db_table {
struct hns_roce_ext_db *ext_db;
};
+#define HW_SYNC_SLEEP_TIME_INTERVAL 20
+#define HW_SYNC_TIMEOUT_MSECS (25 * HW_SYNC_SLEEP_TIME_INTERVAL)
+#define BT_CMD_SYNC_SHIFT 31
+#define HNS_ROCE_BA_SIZE (32 * 4096)
+
struct hns_roce_bt_table {
struct hns_roce_buf_list qpc_buf;
struct hns_roce_buf_list mtpt_buf;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 7652dafe32ec..594d4cef31b3 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -105,16 +105,12 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
u64 pbl_ba;
/* use ib_access_flags */
- roce_set_bit(fseg->byte_40, V2_RC_FRMR_WQE_BYTE_40_BIND_EN_S,
- !!(wr->access & IB_ACCESS_MW_BIND));
- roce_set_bit(fseg->byte_40, V2_RC_FRMR_WQE_BYTE_40_ATOMIC_S,
- !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
- roce_set_bit(fseg->byte_40, V2_RC_FRMR_WQE_BYTE_40_RR_S,
- !!(wr->access & IB_ACCESS_REMOTE_READ));
- roce_set_bit(fseg->byte_40, V2_RC_FRMR_WQE_BYTE_40_RW_S,
- !!(wr->access & IB_ACCESS_REMOTE_WRITE));
- roce_set_bit(fseg->byte_40, V2_RC_FRMR_WQE_BYTE_40_LW_S,
- !!(wr->access & IB_ACCESS_LOCAL_WRITE));
+ hr_reg_write_bool(fseg, FRMR_BIND_EN, wr->access & IB_ACCESS_MW_BIND);
+ hr_reg_write_bool(fseg, FRMR_ATOMIC,
+ wr->access & IB_ACCESS_REMOTE_ATOMIC);
+ hr_reg_write_bool(fseg, FRMR_RR, wr->access & IB_ACCESS_REMOTE_READ);
+ hr_reg_write_bool(fseg, FRMR_RW, wr->access & IB_ACCESS_REMOTE_WRITE);
+ hr_reg_write_bool(fseg, FRMR_LW, wr->access & IB_ACCESS_LOCAL_WRITE);
/* Data structure reuse may lead to confusion */
pbl_ba = mr->pbl_mtr.hem_cfg.root_ba;
@@ -126,11 +122,10 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
rc_sq_wqe->rkey = cpu_to_le32(wr->key);
rc_sq_wqe->va = cpu_to_le64(wr->mr->iova);
- fseg->pbl_size = cpu_to_le32(mr->npages);
- roce_set_field(fseg->byte_40, V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_M,
- V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_S,
- to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
- roce_set_bit(fseg->byte_40, V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S, 0);
+ hr_reg_write(fseg, FRMR_PBL_SIZE, mr->npages);
+ hr_reg_write(fseg, FRMR_PBL_BUF_PG_SZ,
+ to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
+ hr_reg_clear(fseg, FRMR_BLK_MODE);
}
static void set_atomic_seg(const struct ib_send_wr *wr,
@@ -274,8 +269,6 @@ static int set_rc_inl(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
dseg += sizeof(struct hns_roce_v2_rc_send_wqe);
- roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S, 1);
-
if (msg_len <= HNS_ROCE_V2_MAX_RC_INL_INN_SZ) {
roce_set_bit(rc_sq_wqe->byte_20,
V2_RC_SEND_WQE_BYTE_20_INL_TYPE_S, 0);
@@ -320,6 +313,8 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
(*sge_ind) & (qp->sge.sge_cnt - 1));
+ roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S,
+ !!(wr->send_flags & IB_SEND_INLINE));
if (wr->send_flags & IB_SEND_INLINE)
return set_rc_inl(qp, wr, rc_sq_wqe, sge_ind);
@@ -629,32 +624,15 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,
static inline void update_sq_db(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *qp)
{
- /*
- * Hip08 hardware cannot flush the WQEs in SQ if the QP state
- * gets into errored mode. Hence, as a workaround to this
- * hardware limitation, driver needs to assist in flushing. But
- * the flushing operation uses mailbox to convey the QP state to
- * the hardware and which can sleep due to the mutex protection
- * around the mailbox calls. Hence, use the deferred flush for
- * now.
- */
if (unlikely(qp->state == IB_QPS_ERR)) {
- if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
- init_flush_work(hr_dev, qp);
+ flush_cqe(hr_dev, qp);
} else {
struct hns_roce_v2_db sq_db = {};
- roce_set_field(sq_db.byte_4, V2_DB_TAG_M, V2_DB_TAG_S,
- qp->doorbell_qpn);
- roce_set_field(sq_db.byte_4, V2_DB_CMD_M, V2_DB_CMD_S,
- HNS_ROCE_V2_SQ_DB);
-
- /* indicates data on new BAR, 0 : SQ doorbell, 1 : DWQE */
- roce_set_bit(sq_db.byte_4, V2_DB_FLAG_S, 0);
- roce_set_field(sq_db.parameter, V2_DB_PRODUCER_IDX_M,
- V2_DB_PRODUCER_IDX_S, qp->sq.head);
- roce_set_field(sq_db.parameter, V2_DB_SL_M, V2_DB_SL_S,
- qp->sl);
+ hr_reg_write(&sq_db, DB_TAG, qp->doorbell_qpn);
+ hr_reg_write(&sq_db, DB_CMD, HNS_ROCE_V2_SQ_DB);
+ hr_reg_write(&sq_db, DB_PI, qp->sq.head);
+ hr_reg_write(&sq_db, DB_SL, qp->sl);
hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg);
}
@@ -663,18 +641,8 @@ static inline void update_sq_db(struct hns_roce_dev *hr_dev,
static inline void update_rq_db(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *qp)
{
- /*
- * Hip08 hardware cannot flush the WQEs in RQ if the QP state
- * gets into errored mode. Hence, as a workaround to this
- * hardware limitation, driver needs to assist in flushing. But
- * the flushing operation uses mailbox to convey the QP state to
- * the hardware and which can sleep due to the mutex protection
- * around the mailbox calls. Hence, use the deferred flush for
- * now.
- */
if (unlikely(qp->state == IB_QPS_ERR)) {
- if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
- init_flush_work(hr_dev, qp);
+ flush_cqe(hr_dev, qp);
} else {
if (likely(qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)) {
*qp->rdb.db_record =
@@ -682,12 +650,9 @@ static inline void update_rq_db(struct hns_roce_dev *hr_dev,
} else {
struct hns_roce_v2_db rq_db = {};
- roce_set_field(rq_db.byte_4, V2_DB_TAG_M, V2_DB_TAG_S,
- qp->qpn);
- roce_set_field(rq_db.byte_4, V2_DB_CMD_M, V2_DB_CMD_S,
- HNS_ROCE_V2_RQ_DB);
- roce_set_field(rq_db.parameter, V2_DB_PRODUCER_IDX_M,
- V2_DB_PRODUCER_IDX_S, qp->rq.head);
+ hr_reg_write(&rq_db, DB_TAG, qp->qpn);
+ hr_reg_write(&rq_db, DB_CMD, HNS_ROCE_V2_RQ_DB);
+ hr_reg_write(&rq_db, DB_PI, qp->rq.head);
hns_roce_write64(hr_dev, (__le32 *)&rq_db,
qp->rq.db_reg);
@@ -775,10 +740,10 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
/* Corresponding to the QP type, wqe process separately */
- if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
- ret = set_ud_wqe(qp, wr, wqe, &sge_idx, owner_bit);
- else if (ibqp->qp_type == IB_QPT_RC)
+ if (ibqp->qp_type == IB_QPT_RC)
ret = set_rc_wqe(qp, wr, wqe, &sge_idx, owner_bit);
+ else
+ ret = set_ud_wqe(qp, wr, wqe, &sge_idx, owner_bit);
if (unlikely(ret)) {
*bad_wr = wr;
@@ -791,8 +756,7 @@ out:
qp->sq.head += nreq;
qp->next_sge = sge_idx;
- if (nreq == 1 && qp->sq.head == qp->sq.tail + 1 &&
- (qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE))
+ if (nreq == 1 && (qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE))
write_dwqe(hr_dev, qp, wqe);
else
update_sq_db(hr_dev, qp);
@@ -1005,6 +969,13 @@ static void fill_wqe_idx(struct hns_roce_srq *srq, unsigned int wqe_idx)
idx_que->head++;
}
+static void update_srq_db(struct hns_roce_v2_db *db, struct hns_roce_srq *srq)
+{
+ hr_reg_write(db, DB_TAG, srq->srqn);
+ hr_reg_write(db, DB_CMD, HNS_ROCE_V2_SRQ_DB);
+ hr_reg_write(db, DB_PI, srq->idx_que.head);
+}
+
static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr)
@@ -1042,12 +1013,7 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
}
if (likely(nreq)) {
- roce_set_field(srq_db.byte_4, V2_DB_TAG_M, V2_DB_TAG_S,
- srq->srqn);
- roce_set_field(srq_db.byte_4, V2_DB_CMD_M, V2_DB_CMD_S,
- HNS_ROCE_V2_SRQ_DB);
- roce_set_field(srq_db.parameter, V2_DB_PRODUCER_IDX_M,
- V2_DB_PRODUCER_IDX_S, srq->idx_que.head);
+ update_srq_db(&srq_db, srq);
hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg);
}
@@ -1210,8 +1176,6 @@ static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev,
kfree(ring->desc);
ring->desc = NULL;
- dev_err_ratelimited(hr_dev->dev,
- "failed to map cmq desc addr.\n");
return -ENOMEM;
}
@@ -1229,44 +1193,32 @@ static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
kfree(ring->desc);
}
-static int hns_roce_init_cmq_ring(struct hns_roce_dev *hr_dev, bool ring_type)
+static int init_csq(struct hns_roce_dev *hr_dev,
+ struct hns_roce_v2_cmq_ring *csq)
{
- struct hns_roce_v2_priv *priv = hr_dev->priv;
- struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
- &priv->cmq.csq : &priv->cmq.crq;
+ dma_addr_t dma;
+ int ret;
- ring->flag = ring_type;
- ring->head = 0;
+ csq->desc_num = CMD_CSQ_DESC_NUM;
+ spin_lock_init(&csq->lock);
+ csq->flag = TYPE_CSQ;
+ csq->head = 0;
- return hns_roce_alloc_cmq_desc(hr_dev, ring);
-}
+ ret = hns_roce_alloc_cmq_desc(hr_dev, csq);
+ if (ret)
+ return ret;
-static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type)
-{
- struct hns_roce_v2_priv *priv = hr_dev->priv;
- struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
- &priv->cmq.csq : &priv->cmq.crq;
- dma_addr_t dma = ring->desc_dma_addr;
-
- if (ring_type == TYPE_CSQ) {
- roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_L_REG, (u32)dma);
- roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG,
- upper_32_bits(dma));
- roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG,
- (u32)ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
-
- /* Make sure to write tail first and then head */
- roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, 0);
- roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0);
- } else {
- roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_L_REG, (u32)dma);
- roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_H_REG,
- upper_32_bits(dma));
- roce_write(hr_dev, ROCEE_RX_CMQ_DEPTH_REG,
- (u32)ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
- roce_write(hr_dev, ROCEE_RX_CMQ_HEAD_REG, 0);
- roce_write(hr_dev, ROCEE_RX_CMQ_TAIL_REG, 0);
- }
+ dma = csq->desc_dma_addr;
+ roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_L_REG, lower_32_bits(dma));
+ roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG, upper_32_bits(dma));
+ roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG,
+ (u32)csq->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
+
+ /* Make sure to write CI first and then PI */
+ roce_write(hr_dev, ROCEE_TX_CMQ_CI_REG, 0);
+ roce_write(hr_dev, ROCEE_TX_CMQ_PI_REG, 0);
+
+ return 0;
}
static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev)
@@ -1274,43 +1226,11 @@ static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev)
struct hns_roce_v2_priv *priv = hr_dev->priv;
int ret;
- /* Setup the queue entries for command queue */
- priv->cmq.csq.desc_num = CMD_CSQ_DESC_NUM;
- priv->cmq.crq.desc_num = CMD_CRQ_DESC_NUM;
-
- /* Setup the lock for command queue */
- spin_lock_init(&priv->cmq.csq.lock);
- spin_lock_init(&priv->cmq.crq.lock);
-
- /* Setup Tx write back timeout */
priv->cmq.tx_timeout = HNS_ROCE_CMQ_TX_TIMEOUT;
- /* Init CSQ */
- ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CSQ);
- if (ret) {
- dev_err_ratelimited(hr_dev->dev,
- "failed to init CSQ, ret = %d.\n", ret);
- return ret;
- }
-
- /* Init CRQ */
- ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CRQ);
- if (ret) {
- dev_err_ratelimited(hr_dev->dev,
- "failed to init CRQ, ret = %d.\n", ret);
- goto err_crq;
- }
-
- /* Init CSQ REG */
- hns_roce_cmq_init_regs(hr_dev, TYPE_CSQ);
-
- /* Init CRQ REG */
- hns_roce_cmq_init_regs(hr_dev, TYPE_CRQ);
-
- return 0;
-
-err_crq:
- hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
+ ret = init_csq(hr_dev, &priv->cmq.csq);
+ if (ret)
+ dev_err(hr_dev->dev, "failed to init CSQ, ret = %d.\n", ret);
return ret;
}
@@ -1320,7 +1240,6 @@ static void hns_roce_v2_cmq_exit(struct hns_roce_dev *hr_dev)
struct hns_roce_v2_priv *priv = hr_dev->priv;
hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
- hns_roce_free_cmq_desc(hr_dev, &priv->cmq.crq);
}
static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
@@ -1339,7 +1258,7 @@ static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev)
{
- u32 tail = roce_read(hr_dev, ROCEE_TX_CMQ_TAIL_REG);
+ u32 tail = roce_read(hr_dev, ROCEE_TX_CMQ_CI_REG);
struct hns_roce_v2_priv *priv = hr_dev->priv;
return tail == priv->cmq.csq.head;
@@ -1367,7 +1286,7 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
}
/* Write to hardware */
- roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, csq->head);
+ roce_write(hr_dev, ROCEE_TX_CMQ_PI_REG, csq->head);
/* If the command is sync, wait for the firmware to write back,
* if multi descriptors to be sent, use the first one to check
@@ -1398,7 +1317,7 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
}
} else {
/* FW/HW reset or incorrect number of desc */
- tail = roce_read(hr_dev, ROCEE_TX_CMQ_TAIL_REG);
+ tail = roce_read(hr_dev, ROCEE_TX_CMQ_CI_REG);
dev_warn(hr_dev->dev, "CMDQ move tail from %d to %d\n",
csq->head, tail);
csq->head = tail;
@@ -1620,6 +1539,22 @@ static void hns_roce_function_clear(struct hns_roce_dev *hr_dev)
}
}
+static int hns_roce_clear_extdb_list_info(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_cmq_desc desc;
+ int ret;
+
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CLEAR_EXTDB_LIST_INFO,
+ false);
+ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
+ if (ret)
+ ibdev_err(&hr_dev->ib_dev,
+ "failed to clear extended doorbell info, ret = %d.\n",
+ ret);
+
+ return ret;
+}
+
static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev)
{
struct hns_roce_query_fw_info *resp;
@@ -1723,17 +1658,30 @@ static int load_func_res_caps(struct hns_roce_dev *hr_dev, bool is_vf)
return 0;
}
-static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
+static int load_ext_cfg_caps(struct hns_roce_dev *hr_dev, bool is_vf)
{
- return load_func_res_caps(hr_dev, false);
-}
+ struct hns_roce_cmq_desc desc;
+ struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
+ struct hns_roce_caps *caps = &hr_dev->caps;
+ u32 func_num, qp_num;
+ int ret;
-static int hns_roce_query_vf_resource(struct hns_roce_dev *hr_dev)
-{
- return load_func_res_caps(hr_dev, true);
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_EXT_CFG, true);
+ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
+ if (ret)
+ return ret;
+
+ func_num = is_vf ? 1 : max_t(u32, 1, hr_dev->func_num);
+ qp_num = hr_reg_read(req, EXT_CFG_QP_PI_NUM) / func_num;
+ caps->num_pi_qps = round_down(qp_num, HNS_ROCE_QP_BANK_NUM);
+
+ qp_num = hr_reg_read(req, EXT_CFG_QP_NUM) / func_num;
+ caps->num_qps = round_down(qp_num, HNS_ROCE_QP_BANK_NUM);
+
+ return 0;
}
-static int hns_roce_query_pf_timer_resource(struct hns_roce_dev *hr_dev)
+static int load_pf_timer_res_caps(struct hns_roce_dev *hr_dev)
{
struct hns_roce_cmq_desc desc;
struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
@@ -1753,6 +1701,50 @@ static int hns_roce_query_pf_timer_resource(struct hns_roce_dev *hr_dev)
return 0;
}
+static int query_func_resource_caps(struct hns_roce_dev *hr_dev, bool is_vf)
+{
+ struct device *dev = hr_dev->dev;
+ int ret;
+
+ ret = load_func_res_caps(hr_dev, is_vf);
+ if (ret) {
+ dev_err(dev, "failed to load res caps, ret = %d (%s).\n", ret,
+ is_vf ? "vf" : "pf");
+ return ret;
+ }
+
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
+ ret = load_ext_cfg_caps(hr_dev, is_vf);
+ if (ret)
+ dev_err(dev, "failed to load ext cfg, ret = %d (%s).\n",
+ ret, is_vf ? "vf" : "pf");
+ }
+
+ return ret;
+}
+
+static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
+{
+ struct device *dev = hr_dev->dev;
+ int ret;
+
+ ret = query_func_resource_caps(hr_dev, false);
+ if (ret)
+ return ret;
+
+ ret = load_pf_timer_res_caps(hr_dev);
+ if (ret)
+ dev_err(dev, "failed to load pf timer resource, ret = %d.\n",
+ ret);
+
+ return ret;
+}
+
+static int hns_roce_query_vf_resource(struct hns_roce_dev *hr_dev)
+{
+ return query_func_resource_caps(hr_dev, true);
+}
+
static int __hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev,
u32 vf_id)
{
@@ -1792,7 +1784,7 @@ static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev)
return 0;
}
-static int __hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev, int vf_id)
+static int config_vf_hem_resource(struct hns_roce_dev *hr_dev, int vf_id)
{
struct hns_roce_cmq_desc desc[2];
struct hns_roce_cmq_req *r_a = (struct hns_roce_cmq_req *)desc[0].data;
@@ -1837,15 +1829,48 @@ static int __hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev, int vf_id)
return hns_roce_cmq_send(hr_dev, desc, 2);
}
+static int config_vf_ext_resource(struct hns_roce_dev *hr_dev, u32 vf_id)
+{
+ struct hns_roce_cmq_desc desc;
+ struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
+ struct hns_roce_caps *caps = &hr_dev->caps;
+
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_EXT_CFG, false);
+
+ hr_reg_write(req, EXT_CFG_VF_ID, vf_id);
+
+ hr_reg_write(req, EXT_CFG_QP_PI_NUM, caps->num_pi_qps);
+ hr_reg_write(req, EXT_CFG_QP_PI_IDX, vf_id * caps->num_pi_qps);
+ hr_reg_write(req, EXT_CFG_QP_NUM, caps->num_qps);
+ hr_reg_write(req, EXT_CFG_QP_IDX, vf_id * caps->num_qps);
+
+ return hns_roce_cmq_send(hr_dev, &desc, 1);
+}
+
static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
{
- int vf_id;
+ u32 func_num = max_t(u32, 1, hr_dev->func_num);
+ u32 vf_id;
int ret;
- for (vf_id = 0; vf_id < hr_dev->func_num; vf_id++) {
- ret = __hns_roce_alloc_vf_resource(hr_dev, vf_id);
- if (ret)
+ for (vf_id = 0; vf_id < func_num; vf_id++) {
+ ret = config_vf_hem_resource(hr_dev, vf_id);
+ if (ret) {
+ dev_err(hr_dev->dev,
+ "failed to config vf-%u hem res, ret = %d.\n",
+ vf_id, ret);
return ret;
+ }
+
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
+ ret = config_vf_ext_resource(hr_dev, vf_id);
+ if (ret) {
+ dev_err(hr_dev->dev,
+ "failed to config vf-%u ext res, ret = %d.\n",
+ vf_id, ret);
+ return ret;
+ }
+ }
}
return 0;
@@ -1897,9 +1922,9 @@ static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
return hns_roce_cmq_send(hr_dev, &desc, 1);
}
+/* Use default caps when hns_roce_query_pf_caps() failed or init VF profile */
static void set_default_caps(struct hns_roce_dev *hr_dev)
{
- struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hns_roce_caps *caps = &hr_dev->caps;
caps->num_qps = HNS_ROCE_V2_MAX_QP_NUM;
@@ -1911,19 +1936,18 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
caps->max_extend_sg = HNS_ROCE_V2_MAX_EXTEND_SGE_NUM;
caps->max_rq_sg = HNS_ROCE_V2_MAX_RQ_SGE_NUM;
+
caps->num_uars = HNS_ROCE_V2_UAR_NUM;
caps->phy_num_uars = HNS_ROCE_V2_PHY_UAR_NUM;
caps->num_aeq_vectors = HNS_ROCE_V2_AEQE_VEC_NUM;
- caps->num_comp_vectors =
- min_t(u32, caps->eqc_bt_num - 1,
- (u32)priv->handle->rinfo.num_vectors - 2);
caps->num_other_vectors = HNS_ROCE_V2_ABNORMAL_VEC_NUM;
+ caps->num_comp_vectors = 0;
+
caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM;
- caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
- caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
- caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
caps->num_pds = HNS_ROCE_V2_MAX_PD_NUM;
- caps->num_xrcds = HNS_ROCE_V2_MAX_XRCD_NUM;
+ caps->num_qpc_timer = HNS_ROCE_V2_MAX_QPC_TIMER_NUM;
+ caps->num_cqc_timer = HNS_ROCE_V2_MAX_CQC_TIMER_NUM;
+
caps->max_qp_init_rdma = HNS_ROCE_V2_MAX_QP_INIT_RDMA;
caps->max_qp_dest_rdma = HNS_ROCE_V2_MAX_QP_DEST_RDMA;
caps->max_sq_desc_sz = HNS_ROCE_V2_MAX_SQ_DESC_SZ;
@@ -1934,12 +1958,10 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
caps->cqc_entry_sz = HNS_ROCE_V2_CQC_ENTRY_SZ;
caps->srqc_entry_sz = HNS_ROCE_V2_SRQC_ENTRY_SZ;
caps->mtpt_entry_sz = HNS_ROCE_V2_MTPT_ENTRY_SZ;
- caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
caps->idx_entry_sz = HNS_ROCE_V2_IDX_ENTRY_SZ;
caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
caps->reserved_lkey = 0;
caps->reserved_pds = 0;
- caps->reserved_xrcds = HNS_ROCE_V2_RSV_XRCD_NUM;
caps->reserved_mrws = 1;
caps->reserved_uars = 0;
caps->reserved_cqs = 0;
@@ -1950,15 +1972,15 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
caps->srqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
caps->cqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
caps->mpt_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
+ caps->sccc_hop_num = HNS_ROCE_SCCC_HOP_NUM;
+
caps->mtt_hop_num = HNS_ROCE_MTT_HOP_NUM;
- caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
caps->wqe_sq_hop_num = HNS_ROCE_SQWQE_HOP_NUM;
caps->wqe_sge_hop_num = HNS_ROCE_EXT_SGE_HOP_NUM;
caps->wqe_rq_hop_num = HNS_ROCE_RQWQE_HOP_NUM;
caps->cqe_hop_num = HNS_ROCE_CQE_HOP_NUM;
caps->srqwqe_hop_num = HNS_ROCE_SRQWQE_HOP_NUM;
caps->idx_hop_num = HNS_ROCE_IDX_HOP_NUM;
- caps->eqe_hop_num = HNS_ROCE_EQE_HOP_NUM;
caps->chunk_sz = HNS_ROCE_V2_TABLE_CHUNK_SIZE;
caps->flags = HNS_ROCE_CAP_FLAG_REREG_MR |
@@ -1979,36 +2001,17 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
HNS_ROCE_CAP_FLAG_SRQ | HNS_ROCE_CAP_FLAG_FRMR |
HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL | HNS_ROCE_CAP_FLAG_XRC;
- caps->num_qpc_timer = HNS_ROCE_V2_MAX_QPC_TIMER_NUM;
- caps->qpc_timer_entry_sz = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ;
- caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
- caps->num_cqc_timer = HNS_ROCE_V2_MAX_CQC_TIMER_NUM;
- caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
- caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
-
- caps->sccc_hop_num = HNS_ROCE_SCCC_HOP_NUM;
+ caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
- caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE;
- caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE;
- caps->cqe_sz = HNS_ROCE_V3_CQE_SIZE;
- caps->qpc_sz = HNS_ROCE_V3_QPC_SZ;
- caps->sccc_sz = HNS_ROCE_V3_SCCC_SZ;
- caps->gmv_entry_sz = HNS_ROCE_V3_GMV_ENTRY_SZ;
- caps->gmv_entry_num = caps->gmv_bt_num * (PAGE_SIZE /
- caps->gmv_entry_sz);
- caps->gmv_hop_num = HNS_ROCE_HOP_NUM_0;
- caps->gid_table_len[0] = caps->gmv_bt_num * (HNS_HW_PAGE_SIZE /
- caps->gmv_entry_sz);
- caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INL_EXT;
+ caps->max_sq_inline = HNS_ROCE_V3_MAX_SQ_INLINE;
} else {
- caps->aeqe_size = HNS_ROCE_AEQE_SIZE;
- caps->ceqe_size = HNS_ROCE_CEQE_SIZE;
- caps->cqe_sz = HNS_ROCE_V2_CQE_SIZE;
+ caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
+
+ /* The following configuration are only valid for HIP08 */
caps->qpc_sz = HNS_ROCE_V2_QPC_SZ;
caps->sccc_sz = HNS_ROCE_V2_SCCC_SZ;
- caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
- caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
+ caps->cqe_sz = HNS_ROCE_V2_CQE_SIZE;
}
}
@@ -2063,9 +2066,11 @@ static void set_hem_page_size(struct hns_roce_dev *hr_dev)
caps->eqe_buf_pg_sz = 0;
/* Link Table */
- caps->tsq_buf_pg_sz = 0;
+ caps->llm_buf_pg_sz = 0;
/* MR */
+ caps->mpt_ba_pg_sz = 0;
+ caps->mpt_buf_pg_sz = 0;
caps->pbl_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_16K;
caps->pbl_buf_pg_sz = 0;
calc_pg_sz(caps->num_mtpts, caps->mtpt_entry_sz, caps->mpt_hop_num,
@@ -2073,8 +2078,12 @@ static void set_hem_page_size(struct hns_roce_dev *hr_dev)
HEM_TYPE_MTPT);
/* QP */
- caps->qpc_timer_ba_pg_sz = 0;
+ caps->qpc_ba_pg_sz = 0;
+ caps->qpc_buf_pg_sz = 0;
+ caps->qpc_timer_ba_pg_sz = 0;
caps->qpc_timer_buf_pg_sz = 0;
+ caps->sccc_ba_pg_sz = 0;
+ caps->sccc_buf_pg_sz = 0;
caps->mtt_ba_pg_sz = 0;
caps->mtt_buf_pg_sz = 0;
calc_pg_sz(caps->num_qps, caps->qpc_sz, caps->qpc_hop_num,
@@ -2087,20 +2096,26 @@ static void set_hem_page_size(struct hns_roce_dev *hr_dev)
&caps->sccc_ba_pg_sz, HEM_TYPE_SCCC);
/* CQ */
+ caps->cqc_ba_pg_sz = 0;
+ caps->cqc_buf_pg_sz = 0;
+ caps->cqc_timer_ba_pg_sz = 0;
+ caps->cqc_timer_buf_pg_sz = 0;
+ caps->cqe_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_256K;
+ caps->cqe_buf_pg_sz = 0;
calc_pg_sz(caps->num_cqs, caps->cqc_entry_sz, caps->cqc_hop_num,
caps->cqc_bt_num, &caps->cqc_buf_pg_sz, &caps->cqc_ba_pg_sz,
HEM_TYPE_CQC);
calc_pg_sz(caps->max_cqes, caps->cqe_sz, caps->cqe_hop_num,
1, &caps->cqe_buf_pg_sz, &caps->cqe_ba_pg_sz, HEM_TYPE_CQE);
- if (caps->cqc_timer_entry_sz)
- calc_pg_sz(caps->num_cqc_timer, caps->cqc_timer_entry_sz,
- caps->cqc_timer_hop_num, caps->cqc_timer_bt_num,
- &caps->cqc_timer_buf_pg_sz,
- &caps->cqc_timer_ba_pg_sz, HEM_TYPE_CQC_TIMER);
-
/* SRQ */
if (caps->flags & HNS_ROCE_CAP_FLAG_SRQ) {
+ caps->srqc_ba_pg_sz = 0;
+ caps->srqc_buf_pg_sz = 0;
+ caps->srqwqe_ba_pg_sz = 0;
+ caps->srqwqe_buf_pg_sz = 0;
+ caps->idx_ba_pg_sz = 0;
+ caps->idx_buf_pg_sz = 0;
calc_pg_sz(caps->num_srqs, caps->srqc_entry_sz,
caps->srqc_hop_num, caps->srqc_bt_num,
&caps->srqc_buf_pg_sz, &caps->srqc_ba_pg_sz,
@@ -2118,6 +2133,71 @@ static void set_hem_page_size(struct hns_roce_dev *hr_dev)
caps->gmv_buf_pg_sz = 0;
}
+/* Apply all loaded caps before setting to hardware */
+static void apply_func_caps(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_caps *caps = &hr_dev->caps;
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+
+ /* The following configurations don't need to be got from firmware. */
+ caps->qpc_timer_entry_sz = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ;
+ caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
+ caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
+
+ caps->eqe_hop_num = HNS_ROCE_EQE_HOP_NUM;
+ caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
+ caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
+ caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
+
+ caps->num_xrcds = HNS_ROCE_V2_MAX_XRCD_NUM;
+ caps->reserved_xrcds = HNS_ROCE_V2_RSV_XRCD_NUM;
+
+ caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
+ caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
+ caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
+
+ if (!caps->num_comp_vectors)
+ caps->num_comp_vectors = min_t(u32, caps->eqc_bt_num - 1,
+ (u32)priv->handle->rinfo.num_vectors - 2);
+
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
+ caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE;
+ caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE;
+
+ /* The following configurations will be overwritten */
+ caps->qpc_sz = HNS_ROCE_V3_QPC_SZ;
+ caps->cqe_sz = HNS_ROCE_V3_CQE_SIZE;
+ caps->sccc_sz = HNS_ROCE_V3_SCCC_SZ;
+
+ /* The following configurations are not got from firmware */
+ caps->gmv_entry_sz = HNS_ROCE_V3_GMV_ENTRY_SZ;
+
+ caps->gmv_hop_num = HNS_ROCE_HOP_NUM_0;
+ caps->gid_table_len[0] = caps->gmv_bt_num *
+ (HNS_HW_PAGE_SIZE / caps->gmv_entry_sz);
+
+ caps->gmv_entry_num = caps->gmv_bt_num * (PAGE_SIZE /
+ caps->gmv_entry_sz);
+ } else {
+ u32 func_num = max_t(u32, 1, hr_dev->func_num);
+
+ caps->ceqe_size = HNS_ROCE_CEQE_SIZE;
+ caps->aeqe_size = HNS_ROCE_AEQE_SIZE;
+ caps->gid_table_len[0] /= func_num;
+ }
+
+ if (hr_dev->is_vf) {
+ caps->default_aeq_arm_st = 0x3;
+ caps->default_ceq_arm_st = 0x3;
+ caps->default_ceq_max_cnt = 0x1;
+ caps->default_ceq_period = 0x10;
+ caps->default_aeq_max_cnt = 0x1;
+ caps->default_aeq_period = 0x10;
+ }
+
+ set_hem_page_size(hr_dev);
+}
+
static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
{
struct hns_roce_cmq_desc desc[HNS_ROCE_QUERY_PF_CAPS_CMD_NUM];
@@ -2167,7 +2247,7 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
caps->max_sq_desc_sz = resp_a->max_sq_desc_sz;
caps->max_rq_desc_sz = resp_a->max_rq_desc_sz;
caps->max_srq_desc_sz = resp_a->max_srq_desc_sz;
- caps->cqe_sz = HNS_ROCE_V2_CQE_SIZE;
+ caps->cqe_sz = resp_a->cqe_sz;
caps->mtpt_entry_sz = resp_b->mtpt_entry_sz;
caps->irrl_entry_sz = resp_b->irrl_entry_sz;
@@ -2177,7 +2257,7 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
caps->idx_entry_sz = resp_b->idx_entry_sz;
caps->sccc_sz = resp_b->sccc_sz;
caps->max_mtu = resp_b->max_mtu;
- caps->qpc_sz = HNS_ROCE_V2_QPC_SZ;
+ caps->qpc_sz = le16_to_cpu(resp_b->qpc_sz);
caps->min_cqes = resp_b->min_cqes;
caps->min_wqes = resp_b->min_wqes;
caps->page_size_cap = le32_to_cpu(resp_b->page_size_cap);
@@ -2202,8 +2282,6 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
V2_QUERY_PF_CAPS_C_MAX_GID_M,
V2_QUERY_PF_CAPS_C_MAX_GID_S);
- caps->gid_table_len[0] /= hr_dev->func_num;
-
caps->max_cqes = 1 << roce_get_field(resp_c->cq_depth,
V2_QUERY_PF_CAPS_C_CQ_DEPTH_M,
V2_QUERY_PF_CAPS_C_CQ_DEPTH_S);
@@ -2274,18 +2352,8 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
caps->default_aeq_max_cnt = le16_to_cpu(resp_e->aeq_max_cnt);
caps->default_aeq_period = le16_to_cpu(resp_e->aeq_period);
- caps->qpc_timer_entry_sz = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ;
- caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
- caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
- caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
- caps->ceqe_size = HNS_ROCE_CEQE_SIZE;
- caps->aeqe_size = HNS_ROCE_AEQE_SIZE;
- caps->num_xrcds = HNS_ROCE_V2_MAX_XRCD_NUM;
- caps->reserved_xrcds = HNS_ROCE_V2_RSV_XRCD_NUM;
- caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
- caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
-
caps->qpc_hop_num = ctx_hop_num;
+ caps->sccc_hop_num = ctx_hop_num;
caps->srqc_hop_num = ctx_hop_num;
caps->cqc_hop_num = ctx_hop_num;
caps->mpt_hop_num = ctx_hop_num;
@@ -2303,23 +2371,6 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_M,
V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_S);
- if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
- caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE;
- caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE;
- caps->cqe_sz = HNS_ROCE_V3_CQE_SIZE;
- caps->qpc_sz = HNS_ROCE_V3_QPC_SZ;
- caps->sccc_sz = HNS_ROCE_V3_SCCC_SZ;
- caps->gmv_entry_sz = HNS_ROCE_V3_GMV_ENTRY_SZ;
- caps->gmv_entry_num = caps->gmv_bt_num * (PAGE_SIZE /
- caps->gmv_entry_sz);
- caps->gmv_hop_num = HNS_ROCE_HOP_NUM_0;
- caps->gid_table_len[0] = caps->gmv_bt_num *
- (HNS_HW_PAGE_SIZE / caps->gmv_entry_sz);
- }
-
- caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
- caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
-
return 0;
}
@@ -2362,285 +2413,235 @@ static int hns_roce_config_entry_size(struct hns_roce_dev *hr_dev)
static int hns_roce_v2_vf_profile(struct hns_roce_dev *hr_dev)
{
+ struct device *dev = hr_dev->dev;
int ret;
- hr_dev->vendor_part_id = hr_dev->pci_dev->device;
- hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid);
hr_dev->func_num = 1;
+ set_default_caps(hr_dev);
+
ret = hns_roce_query_vf_resource(hr_dev);
if (ret) {
- dev_err(hr_dev->dev,
- "Query the VF resource fail, ret = %d.\n", ret);
+ dev_err(dev, "failed to query VF resource, ret = %d.\n", ret);
return ret;
}
- set_default_caps(hr_dev);
- set_hem_page_size(hr_dev);
+ apply_func_caps(hr_dev);
ret = hns_roce_v2_set_bt(hr_dev);
- if (ret) {
- dev_err(hr_dev->dev,
- "Configure the VF bt attribute fail, ret = %d.\n",
- ret);
- return ret;
- }
+ if (ret)
+ dev_err(dev, "failed to config VF BA table, ret = %d.\n", ret);
- return 0;
+ return ret;
}
-static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
+static int hns_roce_v2_pf_profile(struct hns_roce_dev *hr_dev)
{
- struct hns_roce_caps *caps = &hr_dev->caps;
+ struct device *dev = hr_dev->dev;
int ret;
- ret = hns_roce_cmq_query_hw_info(hr_dev);
+ ret = hns_roce_query_func_info(hr_dev);
if (ret) {
- dev_err(hr_dev->dev, "Query hardware version fail, ret = %d.\n",
- ret);
+ dev_err(dev, "failed to query func info, ret = %d.\n", ret);
return ret;
}
- ret = hns_roce_query_fw_ver(hr_dev);
+ ret = hns_roce_config_global_param(hr_dev);
if (ret) {
- dev_err(hr_dev->dev, "Query firmware version fail, ret = %d.\n",
- ret);
+ dev_err(dev, "failed to config global param, ret = %d.\n", ret);
return ret;
}
- if (hr_dev->is_vf)
- return hns_roce_v2_vf_profile(hr_dev);
-
- ret = hns_roce_query_func_info(hr_dev);
+ ret = hns_roce_set_vf_switch_param(hr_dev);
if (ret) {
- dev_err(hr_dev->dev, "Query function info fail, ret = %d.\n",
- ret);
+ dev_err(dev, "failed to set switch param, ret = %d.\n", ret);
return ret;
}
- ret = hns_roce_config_global_param(hr_dev);
- if (ret) {
- dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n",
- ret);
- return ret;
- }
+ ret = hns_roce_query_pf_caps(hr_dev);
+ if (ret)
+ set_default_caps(hr_dev);
- /* Get pf resource owned by every pf */
ret = hns_roce_query_pf_resource(hr_dev);
if (ret) {
- dev_err(hr_dev->dev, "Query pf resource fail, ret = %d.\n",
- ret);
+ dev_err(dev, "failed to query pf resource, ret = %d.\n", ret);
return ret;
}
- ret = hns_roce_query_pf_timer_resource(hr_dev);
+ apply_func_caps(hr_dev);
+
+ ret = hns_roce_alloc_vf_resource(hr_dev);
if (ret) {
- dev_err(hr_dev->dev,
- "failed to query pf timer resource, ret = %d.\n", ret);
+ dev_err(dev, "failed to alloc vf resource, ret = %d.\n", ret);
return ret;
}
- ret = hns_roce_set_vf_switch_param(hr_dev);
+ ret = hns_roce_v2_set_bt(hr_dev);
if (ret) {
- dev_err(hr_dev->dev,
- "failed to set function switch param, ret = %d.\n",
- ret);
+ dev_err(dev, "failed to config BA table, ret = %d.\n", ret);
return ret;
}
- hr_dev->vendor_part_id = hr_dev->pci_dev->device;
- hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid);
-
- caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
- caps->eqe_hop_num = HNS_ROCE_EQE_HOP_NUM;
+ /* Configure the size of QPC, SCCC, etc. */
+ return hns_roce_config_entry_size(hr_dev);
+}
- ret = hns_roce_query_pf_caps(hr_dev);
- if (ret)
- set_default_caps(hr_dev);
+static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
+{
+ struct device *dev = hr_dev->dev;
+ int ret;
- ret = hns_roce_alloc_vf_resource(hr_dev);
+ ret = hns_roce_cmq_query_hw_info(hr_dev);
if (ret) {
- dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n",
- ret);
+ dev_err(dev, "failed to query hardware info, ret = %d.\n", ret);
return ret;
}
- set_hem_page_size(hr_dev);
- ret = hns_roce_v2_set_bt(hr_dev);
+ ret = hns_roce_query_fw_ver(hr_dev);
if (ret) {
- dev_err(hr_dev->dev,
- "Configure bt attribute fail, ret = %d.\n", ret);
+ dev_err(dev, "failed to query firmware info, ret = %d.\n", ret);
return ret;
}
- /* Configure the size of QPC, SCCC, etc. */
- ret = hns_roce_config_entry_size(hr_dev);
+ hr_dev->vendor_part_id = hr_dev->pci_dev->device;
+ hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid);
- return ret;
+ if (hr_dev->is_vf)
+ return hns_roce_v2_vf_profile(hr_dev);
+ else
+ return hns_roce_v2_pf_profile(hr_dev);
}
-static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev,
- enum hns_roce_link_table_type type)
+static void config_llm_table(struct hns_roce_buf *data_buf, void *cfg_buf)
{
- struct hns_roce_cmq_desc desc[2];
- struct hns_roce_cfg_llm_a *req_a =
- (struct hns_roce_cfg_llm_a *)desc[0].data;
- struct hns_roce_cfg_llm_b *req_b =
- (struct hns_roce_cfg_llm_b *)desc[1].data;
- struct hns_roce_v2_priv *priv = hr_dev->priv;
- struct hns_roce_link_table *link_tbl;
- struct hns_roce_link_table_entry *entry;
- enum hns_roce_opcode_type opcode;
- u32 page_num;
+ u32 i, next_ptr, page_num;
+ __le64 *entry = cfg_buf;
+ dma_addr_t addr;
+ u64 val;
- switch (type) {
- case TSQ_LINK_TABLE:
- link_tbl = &priv->tsq;
- opcode = HNS_ROCE_OPC_CFG_EXT_LLM;
- break;
- case TPQ_LINK_TABLE:
- link_tbl = &priv->tpq;
- opcode = HNS_ROCE_OPC_CFG_TMOUT_LLM;
- break;
- default:
- return -EINVAL;
+ page_num = data_buf->npages;
+ for (i = 0; i < page_num; i++) {
+ addr = hns_roce_buf_page(data_buf, i);
+ if (i == (page_num - 1))
+ next_ptr = 0;
+ else
+ next_ptr = i + 1;
+
+ val = HNS_ROCE_EXT_LLM_ENTRY(addr, (u64)next_ptr);
+ entry[i] = cpu_to_le64(val);
}
+}
- page_num = link_tbl->npages;
- entry = link_tbl->table.buf;
+static int set_llm_cfg_to_hw(struct hns_roce_dev *hr_dev,
+ struct hns_roce_link_table *table)
+{
+ struct hns_roce_cmq_desc desc[2];
+ struct hns_roce_cmq_req *r_a = (struct hns_roce_cmq_req *)desc[0].data;
+ struct hns_roce_cmq_req *r_b = (struct hns_roce_cmq_req *)desc[1].data;
+ struct hns_roce_buf *buf = table->buf;
+ enum hns_roce_opcode_type opcode;
+ dma_addr_t addr;
+ opcode = HNS_ROCE_OPC_CFG_EXT_LLM;
hns_roce_cmq_setup_basic_desc(&desc[0], opcode, false);
desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
-
hns_roce_cmq_setup_basic_desc(&desc[1], opcode, false);
- req_a->base_addr_l = cpu_to_le32(link_tbl->table.map & 0xffffffff);
- req_a->base_addr_h = cpu_to_le32(link_tbl->table.map >> 32);
- roce_set_field(req_a->depth_pgsz_init_en, CFG_LLM_QUE_DEPTH_M,
- CFG_LLM_QUE_DEPTH_S, link_tbl->npages);
- roce_set_field(req_a->depth_pgsz_init_en, CFG_LLM_QUE_PGSZ_M,
- CFG_LLM_QUE_PGSZ_S, link_tbl->pg_sz);
- roce_set_field(req_a->depth_pgsz_init_en, CFG_LLM_INIT_EN_M,
- CFG_LLM_INIT_EN_S, 1);
- req_a->head_ba_l = cpu_to_le32(entry[0].blk_ba0);
- req_a->head_ba_h_nxtptr = cpu_to_le32(entry[0].blk_ba1_nxt_ptr);
- roce_set_field(req_a->head_ptr, CFG_LLM_HEAD_PTR_M, CFG_LLM_HEAD_PTR_S,
- 0);
-
- req_b->tail_ba_l = cpu_to_le32(entry[page_num - 1].blk_ba0);
- roce_set_field(req_b->tail_ba_h, CFG_LLM_TAIL_BA_H_M,
- CFG_LLM_TAIL_BA_H_S,
- entry[page_num - 1].blk_ba1_nxt_ptr &
- HNS_ROCE_LINK_TABLE_BA1_M);
- roce_set_field(req_b->tail_ptr, CFG_LLM_TAIL_PTR_M, CFG_LLM_TAIL_PTR_S,
- (entry[page_num - 2].blk_ba1_nxt_ptr &
- HNS_ROCE_LINK_TABLE_NXT_PTR_M) >>
- HNS_ROCE_LINK_TABLE_NXT_PTR_S);
+ hr_reg_write(r_a, CFG_LLM_A_BA_L, lower_32_bits(table->table.map));
+ hr_reg_write(r_a, CFG_LLM_A_BA_H, upper_32_bits(table->table.map));
+ hr_reg_write(r_a, CFG_LLM_A_DEPTH, buf->npages);
+ hr_reg_write(r_a, CFG_LLM_A_PGSZ, to_hr_hw_page_shift(buf->page_shift));
+ hr_reg_enable(r_a, CFG_LLM_A_INIT_EN);
+
+ addr = to_hr_hw_page_addr(hns_roce_buf_page(buf, 0));
+ hr_reg_write(r_a, CFG_LLM_A_HEAD_BA_L, lower_32_bits(addr));
+ hr_reg_write(r_a, CFG_LLM_A_HEAD_BA_H, upper_32_bits(addr));
+ hr_reg_write(r_a, CFG_LLM_A_HEAD_NXTPTR, 1);
+ hr_reg_write(r_a, CFG_LLM_A_HEAD_PTR, 0);
+
+ addr = to_hr_hw_page_addr(hns_roce_buf_page(buf, buf->npages - 1));
+ hr_reg_write(r_b, CFG_LLM_B_TAIL_BA_L, lower_32_bits(addr));
+ hr_reg_write(r_b, CFG_LLM_B_TAIL_BA_H, upper_32_bits(addr));
+ hr_reg_write(r_b, CFG_LLM_B_TAIL_PTR, buf->npages - 1);
return hns_roce_cmq_send(hr_dev, desc, 2);
}
-static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev,
- enum hns_roce_link_table_type type)
+static struct hns_roce_link_table *
+alloc_link_table_buf(struct hns_roce_dev *hr_dev)
{
struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hns_roce_link_table *link_tbl;
- struct hns_roce_link_table_entry *entry;
- struct device *dev = hr_dev->dev;
- u32 buf_chk_sz;
- dma_addr_t t;
- int func_num = 1;
- u32 pg_num_a;
- u32 pg_num_b;
- u32 pg_num;
- u32 size;
- int i;
-
- switch (type) {
- case TSQ_LINK_TABLE:
- link_tbl = &priv->tsq;
- buf_chk_sz = 1 << (hr_dev->caps.tsq_buf_pg_sz + PAGE_SHIFT);
- pg_num_a = hr_dev->caps.num_qps * 8 / buf_chk_sz;
- pg_num_b = hr_dev->caps.sl_num * 4 + 2;
- break;
- case TPQ_LINK_TABLE:
- link_tbl = &priv->tpq;
- buf_chk_sz = 1 << (hr_dev->caps.tpq_buf_pg_sz + PAGE_SHIFT);
- pg_num_a = hr_dev->caps.num_cqs * 4 / buf_chk_sz;
- pg_num_b = 2 * 4 * func_num + 2;
- break;
- default:
- return -EINVAL;
+ u32 pg_shift, size, min_size;
+
+ link_tbl = &priv->ext_llm;
+ pg_shift = hr_dev->caps.llm_buf_pg_sz + PAGE_SHIFT;
+ size = hr_dev->caps.num_qps * HNS_ROCE_V2_EXT_LLM_ENTRY_SZ;
+ min_size = HNS_ROCE_EXT_LLM_MIN_PAGES(hr_dev->caps.sl_num) << pg_shift;
+
+ /* Alloc data table */
+ size = max(size, min_size);
+ link_tbl->buf = hns_roce_buf_alloc(hr_dev, size, pg_shift, 0);
+ if (IS_ERR(link_tbl->buf))
+ return ERR_PTR(-ENOMEM);
+
+ /* Alloc config table */
+ size = link_tbl->buf->npages * sizeof(u64);
+ link_tbl->table.buf = dma_alloc_coherent(hr_dev->dev, size,
+ &link_tbl->table.map,
+ GFP_KERNEL);
+ if (!link_tbl->table.buf) {
+ hns_roce_buf_free(hr_dev, link_tbl->buf);
+ return ERR_PTR(-ENOMEM);
}
- pg_num = max(pg_num_a, pg_num_b);
- size = pg_num * sizeof(struct hns_roce_link_table_entry);
+ return link_tbl;
+}
- link_tbl->table.buf = dma_alloc_coherent(dev, size,
- &link_tbl->table.map,
- GFP_KERNEL);
- if (!link_tbl->table.buf)
- goto out;
+static void free_link_table_buf(struct hns_roce_dev *hr_dev,
+ struct hns_roce_link_table *tbl)
+{
+ if (tbl->buf) {
+ u32 size = tbl->buf->npages * sizeof(u64);
- link_tbl->pg_list = kcalloc(pg_num, sizeof(*link_tbl->pg_list),
- GFP_KERNEL);
- if (!link_tbl->pg_list)
- goto err_kcalloc_failed;
+ dma_free_coherent(hr_dev->dev, size, tbl->table.buf,
+ tbl->table.map);
+ }
- entry = link_tbl->table.buf;
- for (i = 0; i < pg_num; ++i) {
- link_tbl->pg_list[i].buf = dma_alloc_coherent(dev, buf_chk_sz,
- &t, GFP_KERNEL);
- if (!link_tbl->pg_list[i].buf)
- goto err_alloc_buf_failed;
+ hns_roce_buf_free(hr_dev, tbl->buf);
+}
- link_tbl->pg_list[i].map = t;
+static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_link_table *link_tbl;
+ int ret;
- entry[i].blk_ba0 = (u32)(t >> 12);
- entry[i].blk_ba1_nxt_ptr = (u32)(t >> 44);
+ link_tbl = alloc_link_table_buf(hr_dev);
+ if (IS_ERR(link_tbl))
+ return -ENOMEM;
- if (i < (pg_num - 1))
- entry[i].blk_ba1_nxt_ptr |=
- (i + 1) << HNS_ROCE_LINK_TABLE_NXT_PTR_S;
+ if (WARN_ON(link_tbl->buf->npages > HNS_ROCE_V2_EXT_LLM_MAX_DEPTH)) {
+ ret = -EINVAL;
+ goto err_alloc;
}
- link_tbl->npages = pg_num;
- link_tbl->pg_sz = buf_chk_sz;
-
- return hns_roce_config_link_table(hr_dev, type);
-err_alloc_buf_failed:
- for (i -= 1; i >= 0; i--)
- dma_free_coherent(dev, buf_chk_sz,
- link_tbl->pg_list[i].buf,
- link_tbl->pg_list[i].map);
- kfree(link_tbl->pg_list);
+ config_llm_table(link_tbl->buf, link_tbl->table.buf);
+ ret = set_llm_cfg_to_hw(hr_dev, link_tbl);
+ if (ret)
+ goto err_alloc;
-err_kcalloc_failed:
- dma_free_coherent(dev, size, link_tbl->table.buf,
- link_tbl->table.map);
+ return 0;
-out:
- return -ENOMEM;
+err_alloc:
+ free_link_table_buf(hr_dev, link_tbl);
+ return ret;
}
-static void hns_roce_free_link_table(struct hns_roce_dev *hr_dev,
- struct hns_roce_link_table *link_tbl)
+static void hns_roce_free_link_table(struct hns_roce_dev *hr_dev)
{
- struct device *dev = hr_dev->dev;
- int size;
- int i;
-
- size = link_tbl->npages * sizeof(struct hns_roce_link_table_entry);
-
- for (i = 0; i < link_tbl->npages; ++i)
- if (link_tbl->pg_list[i].buf)
- dma_free_coherent(dev, link_tbl->pg_sz,
- link_tbl->pg_list[i].buf,
- link_tbl->pg_list[i].map);
- kfree(link_tbl->pg_list);
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
- dma_free_coherent(dev, size, link_tbl->table.buf,
- link_tbl->table.map);
+ free_link_table_buf(hr_dev, &priv->ext_llm);
}
static void free_dip_list(struct hns_roce_dev *hr_dev)
@@ -2736,9 +2737,13 @@ static void put_hem_table(struct hns_roce_dev *hr_dev)
static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
{
- struct hns_roce_v2_priv *priv = hr_dev->priv;
int ret;
+ /* The hns ROCEE requires the extdb info to be cleared before using */
+ ret = hns_roce_clear_extdb_list_info(hr_dev);
+ if (ret)
+ return ret;
+
ret = get_hem_table(hr_dev);
if (ret)
return ret;
@@ -2746,40 +2751,26 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
if (hr_dev->is_vf)
return 0;
- /* TSQ includes SQ doorbell and ack doorbell */
- ret = hns_roce_init_link_table(hr_dev, TSQ_LINK_TABLE);
- if (ret) {
- dev_err(hr_dev->dev, "failed to init TSQ, ret = %d.\n", ret);
- goto err_tsq_init_failed;
- }
-
- ret = hns_roce_init_link_table(hr_dev, TPQ_LINK_TABLE);
+ ret = hns_roce_init_link_table(hr_dev);
if (ret) {
- dev_err(hr_dev->dev, "failed to init TPQ, ret = %d.\n", ret);
- goto err_tpq_init_failed;
+ dev_err(hr_dev->dev, "failed to init llm, ret = %d.\n", ret);
+ goto err_llm_init_failed;
}
return 0;
-err_tsq_init_failed:
+err_llm_init_failed:
put_hem_table(hr_dev);
-err_tpq_init_failed:
- hns_roce_free_link_table(hr_dev, &priv->tpq);
-
return ret;
}
static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
{
- struct hns_roce_v2_priv *priv = hr_dev->priv;
-
hns_roce_function_clear(hr_dev);
- if (!hr_dev->is_vf) {
- hns_roce_free_link_table(hr_dev, &priv->tpq);
- hns_roce_free_link_table(hr_dev, &priv->tsq);
- }
+ if (!hr_dev->is_vf)
+ hns_roce_free_link_table(hr_dev);
if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP09)
free_dip_list(hr_dev);
@@ -3085,16 +3076,16 @@ static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev,
hr_reg_write(mpt_entry, MPT_PD, mr->pd);
hr_reg_enable(mpt_entry, MPT_L_INV_EN);
- hr_reg_write(mpt_entry, MPT_BIND_EN,
- !!(mr->access & IB_ACCESS_MW_BIND));
- hr_reg_write(mpt_entry, MPT_ATOMIC_EN,
- !!(mr->access & IB_ACCESS_REMOTE_ATOMIC));
- hr_reg_write(mpt_entry, MPT_RR_EN,
- !!(mr->access & IB_ACCESS_REMOTE_READ));
- hr_reg_write(mpt_entry, MPT_RW_EN,
- !!(mr->access & IB_ACCESS_REMOTE_WRITE));
- hr_reg_write(mpt_entry, MPT_LW_EN,
- !!((mr->access & IB_ACCESS_LOCAL_WRITE)));
+ hr_reg_write_bool(mpt_entry, MPT_BIND_EN,
+ mr->access & IB_ACCESS_MW_BIND);
+ hr_reg_write_bool(mpt_entry, MPT_ATOMIC_EN,
+ mr->access & IB_ACCESS_REMOTE_ATOMIC);
+ hr_reg_write_bool(mpt_entry, MPT_RR_EN,
+ mr->access & IB_ACCESS_REMOTE_READ);
+ hr_reg_write_bool(mpt_entry, MPT_RW_EN,
+ mr->access & IB_ACCESS_REMOTE_WRITE);
+ hr_reg_write_bool(mpt_entry, MPT_LW_EN,
+ mr->access & IB_ACCESS_LOCAL_WRITE);
mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
@@ -3261,8 +3252,8 @@ static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, unsigned int n)
struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe);
/* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
- return (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_OWNER_S) ^
- !!(n & hr_cq->cq_depth)) ? cqe : NULL;
+ return (hr_reg_read(cqe, CQE_OWNER) ^ !!(n & hr_cq->cq_depth)) ? cqe :
+ NULL;
}
static inline void update_cq_db(struct hns_roce_dev *hr_dev,
@@ -3273,14 +3264,10 @@ static inline void update_cq_db(struct hns_roce_dev *hr_dev,
} else {
struct hns_roce_v2_db cq_db = {};
- roce_set_field(cq_db.byte_4, V2_DB_TAG_M, V2_DB_TAG_S,
- hr_cq->cqn);
- roce_set_field(cq_db.byte_4, V2_DB_CMD_M, V2_DB_CMD_S,
- HNS_ROCE_V2_CQ_DB);
- roce_set_field(cq_db.parameter, V2_CQ_DB_CONS_IDX_M,
- V2_CQ_DB_CONS_IDX_S, hr_cq->cons_index);
- roce_set_field(cq_db.parameter, V2_CQ_DB_CMD_SN_M,
- V2_CQ_DB_CMD_SN_S, 1);
+ hr_reg_write(&cq_db, DB_TAG, hr_cq->cqn);
+ hr_reg_write(&cq_db, DB_CMD, HNS_ROCE_V2_CQ_DB);
+ hr_reg_write(&cq_db, DB_CQ_CI, hr_cq->cons_index);
+ hr_reg_write(&cq_db, DB_CQ_CMD_SN, 1);
hns_roce_write64(hr_dev, (__le32 *)&cq_db, hr_cq->db_reg);
}
@@ -3308,25 +3295,18 @@ static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
*/
while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
cqe = get_cqe_v2(hr_cq, prod_index & hr_cq->ib_cq.cqe);
- if ((roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
- V2_CQE_BYTE_16_LCL_QPN_S) &
- HNS_ROCE_V2_CQE_QPN_MASK) == qpn) {
- if (srq &&
- roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S)) {
- wqe_index = roce_get_field(cqe->byte_4,
- V2_CQE_BYTE_4_WQE_INDX_M,
- V2_CQE_BYTE_4_WQE_INDX_S);
+ if (hr_reg_read(cqe, CQE_LCL_QPN) == qpn) {
+ if (srq && hr_reg_read(cqe, CQE_S_R)) {
+ wqe_index = hr_reg_read(cqe, CQE_WQE_IDX);
hns_roce_free_srq_wqe(srq, wqe_index);
}
++nfreed;
} else if (nfreed) {
dest = get_cqe_v2(hr_cq, (prod_index + nfreed) &
hr_cq->ib_cq.cqe);
- owner_bit = roce_get_bit(dest->byte_4,
- V2_CQE_BYTE_4_OWNER_S);
+ owner_bit = hr_reg_read(dest, CQE_OWNER);
memcpy(dest, cqe, sizeof(*cqe));
- roce_set_bit(dest->byte_4, V2_CQE_BYTE_4_OWNER_S,
- owner_bit);
+ hr_reg_write(dest, CQE_OWNER, owner_bit);
}
}
@@ -3353,73 +3333,44 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
cq_context = mb_buf;
memset(cq_context, 0, sizeof(*cq_context));
- roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CQ_ST_M,
- V2_CQC_BYTE_4_CQ_ST_S, V2_CQ_STATE_VALID);
- roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_ARM_ST_M,
- V2_CQC_BYTE_4_ARM_ST_S, REG_NXT_CEQE);
- roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_SHIFT_M,
- V2_CQC_BYTE_4_SHIFT_S, ilog2(hr_cq->cq_depth));
- roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M,
- V2_CQC_BYTE_4_CEQN_S, hr_cq->vector);
-
- roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M,
- V2_CQC_BYTE_8_CQN_S, hr_cq->cqn);
+ hr_reg_write(cq_context, CQC_CQ_ST, V2_CQ_STATE_VALID);
+ hr_reg_write(cq_context, CQC_ARM_ST, REG_NXT_CEQE);
+ hr_reg_write(cq_context, CQC_SHIFT, ilog2(hr_cq->cq_depth));
+ hr_reg_write(cq_context, CQC_CEQN, hr_cq->vector);
+ hr_reg_write(cq_context, CQC_CQN, hr_cq->cqn);
- roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQE_SIZE_M,
- V2_CQC_BYTE_8_CQE_SIZE_S, hr_cq->cqe_size ==
- HNS_ROCE_V3_CQE_SIZE ? 1 : 0);
+ if (hr_cq->cqe_size == HNS_ROCE_V3_CQE_SIZE)
+ hr_reg_write(cq_context, CQC_CQE_SIZE, CQE_SIZE_64B);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_STASH)
hr_reg_enable(cq_context, CQC_STASH);
- cq_context->cqe_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[0]));
-
- roce_set_field(cq_context->byte_16_hop_addr,
- V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M,
- V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S,
- upper_32_bits(to_hr_hw_page_addr(mtts[0])));
- roce_set_field(cq_context->byte_16_hop_addr,
- V2_CQC_BYTE_16_CQE_HOP_NUM_M,
- V2_CQC_BYTE_16_CQE_HOP_NUM_S, hr_dev->caps.cqe_hop_num ==
- HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num);
-
- cq_context->cqe_nxt_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[1]));
- roce_set_field(cq_context->byte_24_pgsz_addr,
- V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_M,
- V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_S,
- upper_32_bits(to_hr_hw_page_addr(mtts[1])));
- roce_set_field(cq_context->byte_24_pgsz_addr,
- V2_CQC_BYTE_24_CQE_BA_PG_SZ_M,
- V2_CQC_BYTE_24_CQE_BA_PG_SZ_S,
- to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.ba_pg_shift));
- roce_set_field(cq_context->byte_24_pgsz_addr,
- V2_CQC_BYTE_24_CQE_BUF_PG_SZ_M,
- V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S,
- to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.buf_pg_shift));
-
- cq_context->cqe_ba = cpu_to_le32(dma_handle >> 3);
-
- roce_set_field(cq_context->byte_40_cqe_ba, V2_CQC_BYTE_40_CQE_BA_M,
- V2_CQC_BYTE_40_CQE_BA_S, (dma_handle >> (32 + 3)));
-
- roce_set_bit(cq_context->byte_44_db_record,
- V2_CQC_BYTE_44_DB_RECORD_EN_S,
- (hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB) ? 1 : 0);
-
- roce_set_field(cq_context->byte_44_db_record,
- V2_CQC_BYTE_44_DB_RECORD_ADDR_M,
- V2_CQC_BYTE_44_DB_RECORD_ADDR_S,
- ((u32)hr_cq->db.dma) >> 1);
- cq_context->db_record_addr = cpu_to_le32(hr_cq->db.dma >> 32);
-
- roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
- V2_CQC_BYTE_56_CQ_MAX_CNT_M,
- V2_CQC_BYTE_56_CQ_MAX_CNT_S,
- HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM);
- roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
- V2_CQC_BYTE_56_CQ_PERIOD_M,
- V2_CQC_BYTE_56_CQ_PERIOD_S,
- HNS_ROCE_V2_CQ_DEFAULT_INTERVAL);
+ hr_reg_write(cq_context, CQC_CQE_CUR_BLK_ADDR_L,
+ to_hr_hw_page_addr(mtts[0]));
+ hr_reg_write(cq_context, CQC_CQE_CUR_BLK_ADDR_H,
+ upper_32_bits(to_hr_hw_page_addr(mtts[0])));
+ hr_reg_write(cq_context, CQC_CQE_HOP_NUM, hr_dev->caps.cqe_hop_num ==
+ HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num);
+ hr_reg_write(cq_context, CQC_CQE_NEX_BLK_ADDR_L,
+ to_hr_hw_page_addr(mtts[1]));
+ hr_reg_write(cq_context, CQC_CQE_NEX_BLK_ADDR_H,
+ upper_32_bits(to_hr_hw_page_addr(mtts[1])));
+ hr_reg_write(cq_context, CQC_CQE_BAR_PG_SZ,
+ to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.ba_pg_shift));
+ hr_reg_write(cq_context, CQC_CQE_BUF_PG_SZ,
+ to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.buf_pg_shift));
+ hr_reg_write(cq_context, CQC_CQE_BA_L, dma_handle >> 3);
+ hr_reg_write(cq_context, CQC_CQE_BA_H, (dma_handle >> (32 + 3)));
+ hr_reg_write_bool(cq_context, CQC_DB_RECORD_EN,
+ hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB);
+ hr_reg_write(cq_context, CQC_CQE_DB_RECORD_ADDR_L,
+ ((u32)hr_cq->db.dma) >> 1);
+ hr_reg_write(cq_context, CQC_CQE_DB_RECORD_ADDR_H,
+ hr_cq->db.dma >> 32);
+ hr_reg_write(cq_context, CQC_CQ_MAX_CNT,
+ HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM);
+ hr_reg_write(cq_context, CQC_CQ_PERIOD,
+ HNS_ROCE_V2_CQ_DEFAULT_INTERVAL);
}
static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
@@ -3437,14 +3388,11 @@ static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
notify_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL;
- roce_set_field(cq_db.byte_4, V2_DB_TAG_M, V2_DB_TAG_S, hr_cq->cqn);
- roce_set_field(cq_db.byte_4, V2_DB_CMD_M, V2_DB_CMD_S,
- HNS_ROCE_V2_CQ_DB_NOTIFY);
- roce_set_field(cq_db.parameter, V2_CQ_DB_CONS_IDX_M,
- V2_CQ_DB_CONS_IDX_S, hr_cq->cons_index);
- roce_set_field(cq_db.parameter, V2_CQ_DB_CMD_SN_M,
- V2_CQ_DB_CMD_SN_S, hr_cq->arm_sn);
- roce_set_bit(cq_db.parameter, V2_CQ_DB_NOTIFY_TYPE_S, notify_flag);
+ hr_reg_write(&cq_db, DB_TAG, hr_cq->cqn);
+ hr_reg_write(&cq_db, DB_CMD, HNS_ROCE_V2_CQ_DB_NOTIFY);
+ hr_reg_write(&cq_db, DB_CQ_CI, hr_cq->cons_index);
+ hr_reg_write(&cq_db, DB_CQ_CMD_SN, hr_cq->arm_sn);
+ hr_reg_write(&cq_db, DB_CQ_NOTIFY, notify_flag);
hns_roce_write64(hr_dev, (__le32 *)&cq_db, hr_cq->db_reg);
@@ -3460,8 +3408,7 @@ static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
u32 sge_cnt, data_len, size;
void *wqe_buf;
- wr_num = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_WQE_INDX_M,
- V2_CQE_BYTE_4_WQE_INDX_S) & 0xffff;
+ wr_num = hr_reg_read(cqe, CQE_WQE_IDX);
wr_cnt = wr_num & (qp->rq.wqe_cnt - 1);
sge_list = qp->rq_inl_buf.wqe_list[wr_cnt].sg_list;
@@ -3560,8 +3507,7 @@ static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
{ HNS_ROCE_CQE_V2_GENERAL_ERR, IB_WC_GENERAL_ERR}
};
- u32 cqe_status = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_STATUS_M,
- V2_CQE_BYTE_4_STATUS_S);
+ u32 cqe_status = hr_reg_read(cqe, CQE_STATUS);
int i;
wc->status = IB_WC_GENERAL_ERR;
@@ -3578,6 +3524,7 @@ static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
ibdev_err(&hr_dev->ib_dev, "error cqe status 0x%x:\n", cqe_status);
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 4, cqe,
cq->cqe_size, false);
+ wc->vendor_err = hr_reg_read(cqe, CQE_SUB_STATUS);
/*
* For hns ROCEE, GENERAL_ERR is an error type that is not defined in
@@ -3587,17 +3534,7 @@ static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
if (cqe_status == HNS_ROCE_CQE_V2_GENERAL_ERR)
return;
- /*
- * Hip08 hardware cannot flush the WQEs in SQ/RQ if the QP state gets
- * into errored mode. Hence, as a workaround to this hardware
- * limitation, driver needs to assist in flushing. But the flushing
- * operation uses mailbox to convey the QP state to the hardware and
- * which can sleep due to the mutex protection around the mailbox calls.
- * Hence, use the deferred flush for now. Once wc error detected, the
- * flushing operation is needed.
- */
- if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
- init_flush_work(hr_dev, qp);
+ flush_cqe(hr_dev, qp);
}
static int get_cur_qp(struct hns_roce_cq *hr_cq, struct hns_roce_v2_cqe *cqe,
@@ -3607,9 +3544,7 @@ static int get_cur_qp(struct hns_roce_cq *hr_cq, struct hns_roce_v2_cqe *cqe,
struct hns_roce_qp *hr_qp = *cur_qp;
u32 qpn;
- qpn = roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
- V2_CQE_BYTE_16_LCL_QPN_S) &
- HNS_ROCE_V2_CQE_QPN_MASK;
+ qpn = hr_reg_read(cqe, CQE_LCL_QPN);
if (!hr_qp || qpn != hr_qp->qpn) {
hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
@@ -3683,8 +3618,7 @@ static void fill_send_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe)
wc->wc_flags = 0;
- hr_opcode = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
- V2_CQE_BYTE_4_OPCODE_S) & 0x1f;
+ hr_opcode = hr_reg_read(cqe, CQE_OPCODE);
switch (hr_opcode) {
case HNS_ROCE_V2_WQE_OP_RDMA_READ:
wc->byte_len = le32_to_cpu(cqe->byte_cnt);
@@ -3716,12 +3650,11 @@ static void fill_send_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe)
static inline bool is_rq_inl_enabled(struct ib_wc *wc, u32 hr_opcode,
struct hns_roce_v2_cqe *cqe)
{
- return wc->qp->qp_type != IB_QPT_UD &&
- wc->qp->qp_type != IB_QPT_GSI &&
+ return wc->qp->qp_type != IB_QPT_UD && wc->qp->qp_type != IB_QPT_GSI &&
(hr_opcode == HNS_ROCE_V2_OPCODE_SEND ||
hr_opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM ||
hr_opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
- roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_RQ_INLINE_S);
+ hr_reg_read(cqe, CQE_RQ_INLINE);
}
static int fill_recv_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe)
@@ -3733,8 +3666,7 @@ static int fill_recv_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe)
wc->byte_len = le32_to_cpu(cqe->byte_cnt);
- hr_opcode = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
- V2_CQE_BYTE_4_OPCODE_S) & 0x1f;
+ hr_opcode = hr_reg_read(cqe, CQE_OPCODE);
switch (hr_opcode) {
case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
@@ -3761,28 +3693,21 @@ static int fill_recv_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe)
return ret;
}
- wc->sl = roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_SL_M,
- V2_CQE_BYTE_32_SL_S);
- wc->src_qp = roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_RMT_QPN_M,
- V2_CQE_BYTE_32_RMT_QPN_S);
+ wc->sl = hr_reg_read(cqe, CQE_SL);
+ wc->src_qp = hr_reg_read(cqe, CQE_RMT_QPN);
wc->slid = 0;
- wc->wc_flags |= roce_get_bit(cqe->byte_32, V2_CQE_BYTE_32_GRH_S) ?
- IB_WC_GRH : 0;
- wc->port_num = roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_PORTN_M,
- V2_CQE_BYTE_32_PORTN_S);
+ wc->wc_flags |= hr_reg_read(cqe, CQE_GRH) ? IB_WC_GRH : 0;
+ wc->port_num = hr_reg_read(cqe, CQE_PORTN);
wc->pkey_index = 0;
- if (roce_get_bit(cqe->byte_28, V2_CQE_BYTE_28_VID_VLD_S)) {
- wc->vlan_id = roce_get_field(cqe->byte_28, V2_CQE_BYTE_28_VID_M,
- V2_CQE_BYTE_28_VID_S);
+ if (hr_reg_read(cqe, CQE_VID_VLD)) {
+ wc->vlan_id = hr_reg_read(cqe, CQE_VID);
wc->wc_flags |= IB_WC_WITH_VLAN;
} else {
wc->vlan_id = 0xffff;
}
- wc->network_hdr_type = roce_get_field(cqe->byte_28,
- V2_CQE_BYTE_28_PORT_TYPE_M,
- V2_CQE_BYTE_28_PORT_TYPE_S);
+ wc->network_hdr_type = hr_reg_read(cqe, CQE_PORT_TYPE);
return 0;
}
@@ -3814,10 +3739,9 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
wc->qp = &qp->ibqp;
wc->vendor_err = 0;
- wqe_idx = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_WQE_INDX_M,
- V2_CQE_BYTE_4_WQE_INDX_S);
+ wqe_idx = hr_reg_read(cqe, CQE_WQE_IDX);
- is_send = !roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S);
+ is_send = !hr_reg_read(cqe, CQE_S_R);
if (is_send) {
wq = &qp->sq;
@@ -4116,38 +4040,33 @@ static void set_access_flags(struct hns_roce_qp *hr_qp,
if (!dest_rd_atomic)
access_flags &= IB_ACCESS_REMOTE_WRITE;
- roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
- !!(access_flags & IB_ACCESS_REMOTE_READ));
- roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, 0);
+ hr_reg_write_bool(context, QPC_RRE,
+ access_flags & IB_ACCESS_REMOTE_READ);
+ hr_reg_clear(qpc_mask, QPC_RRE);
- roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
- !!(access_flags & IB_ACCESS_REMOTE_WRITE));
- roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S, 0);
+ hr_reg_write_bool(context, QPC_RWE,
+ access_flags & IB_ACCESS_REMOTE_WRITE);
+ hr_reg_clear(qpc_mask, QPC_RWE);
- roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
- !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
- roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0);
- roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_EXT_ATE_S,
- !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
- roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_EXT_ATE_S, 0);
+ hr_reg_write_bool(context, QPC_ATE,
+ access_flags & IB_ACCESS_REMOTE_ATOMIC);
+ hr_reg_clear(qpc_mask, QPC_ATE);
+ hr_reg_write_bool(context, QPC_EXT_ATE,
+ access_flags & IB_ACCESS_REMOTE_ATOMIC);
+ hr_reg_clear(qpc_mask, QPC_EXT_ATE);
}
static void set_qpc_wqe_cnt(struct hns_roce_qp *hr_qp,
struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask)
{
- roce_set_field(context->byte_4_sqpn_tst,
- V2_QPC_BYTE_4_SGE_SHIFT_M, V2_QPC_BYTE_4_SGE_SHIFT_S,
- to_hr_hem_entries_shift(hr_qp->sge.sge_cnt,
- hr_qp->sge.sge_shift));
+ hr_reg_write(context, QPC_SGE_SHIFT,
+ to_hr_hem_entries_shift(hr_qp->sge.sge_cnt,
+ hr_qp->sge.sge_shift));
- roce_set_field(context->byte_20_smac_sgid_idx,
- V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
- ilog2(hr_qp->sq.wqe_cnt));
+ hr_reg_write(context, QPC_SQ_SHIFT, ilog2(hr_qp->sq.wqe_cnt));
- roce_set_field(context->byte_20_smac_sgid_idx,
- V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
- ilog2(hr_qp->rq.wqe_cnt));
+ hr_reg_write(context, QPC_RQ_SHIFT, ilog2(hr_qp->rq.wqe_cnt));
}
static inline int get_cqn(struct ib_cq *ib_cq)
@@ -4175,62 +4094,45 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
* we should set all bits of the relevant fields in context mask to
* 0 at the same time, else set them to 0x1.
*/
- roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
- V2_QPC_BYTE_4_TST_S, to_hr_qp_type(ibqp->qp_type));
-
- roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
- V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
+ hr_reg_write(context, QPC_TST, to_hr_qp_type(ibqp->qp_type));
- roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
- V2_QPC_BYTE_16_PD_S, get_pdn(ibqp->pd));
+ hr_reg_write(context, QPC_PD, get_pdn(ibqp->pd));
- roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
- V2_QPC_BYTE_20_RQWS_S, ilog2(hr_qp->rq.max_gs));
+ hr_reg_write(context, QPC_RQWS, ilog2(hr_qp->rq.max_gs));
set_qpc_wqe_cnt(hr_qp, context, qpc_mask);
/* No VLAN need to set 0xFFF */
- roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
- V2_QPC_BYTE_24_VLAN_ID_S, 0xfff);
+ hr_reg_write(context, QPC_VLAN_ID, 0xfff);
if (ibqp->qp_type == IB_QPT_XRC_TGT) {
context->qkey_xrcd = cpu_to_le32(hr_qp->xrcdn);
- roce_set_bit(context->byte_80_rnr_rx_cqn,
- V2_QPC_BYTE_80_XRC_QP_TYPE_S, 1);
+ hr_reg_enable(context, QPC_XRC_QP_TYPE);
}
if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
- roce_set_bit(context->byte_68_rq_db,
- V2_QPC_BYTE_68_RQ_RECORD_EN_S, 1);
+ hr_reg_enable(context, QPC_RQ_RECORD_EN);
- roce_set_field(context->byte_68_rq_db,
- V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
- V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S,
- ((u32)hr_qp->rdb.dma) >> 1);
- context->rq_db_record_addr = cpu_to_le32(hr_qp->rdb.dma >> 32);
+ hr_reg_write(context, QPC_RQ_DB_RECORD_ADDR_L,
+ lower_32_bits(hr_qp->rdb.dma) >> 1);
+ hr_reg_write(context, QPC_RQ_DB_RECORD_ADDR_H,
+ upper_32_bits(hr_qp->rdb.dma));
if (ibqp->qp_type != IB_QPT_UD && ibqp->qp_type != IB_QPT_GSI)
- roce_set_bit(context->byte_76_srqn_op_en,
- V2_QPC_BYTE_76_RQIE_S,
- !!(hr_dev->caps.flags &
- HNS_ROCE_CAP_FLAG_RQ_INLINE));
+ hr_reg_write_bool(context, QPC_RQIE,
+ hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE);
- roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
- V2_QPC_BYTE_80_RX_CQN_S, get_cqn(ibqp->recv_cq));
+ hr_reg_write(context, QPC_RX_CQN, get_cqn(ibqp->recv_cq));
if (ibqp->srq) {
- roce_set_bit(context->byte_76_srqn_op_en,
- V2_QPC_BYTE_76_SRQ_EN_S, 1);
- roce_set_field(context->byte_76_srqn_op_en,
- V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
- to_hr_srq(ibqp->srq)->srqn);
+ hr_reg_enable(context, QPC_SRQ_EN);
+ hr_reg_write(context, QPC_SRQN, to_hr_srq(ibqp->srq)->srqn);
}
- roce_set_bit(context->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 1);
+ hr_reg_enable(context, QPC_FRE);
- roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
- V2_QPC_BYTE_252_TX_CQN_S, get_cqn(ibqp->send_cq));
+ hr_reg_write(context, QPC_TX_CQN, get_cqn(ibqp->send_cq));
if (hr_dev->caps.qpc_sz < HNS_ROCE_V3_QPC_SZ)
return;
@@ -4252,49 +4154,28 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp,
* we should set all bits of the relevant fields in context mask to
* 0 at the same time, else set them to 0x1.
*/
- roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
- V2_QPC_BYTE_4_TST_S, to_hr_qp_type(ibqp->qp_type));
- roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
- V2_QPC_BYTE_4_TST_S, 0);
+ hr_reg_write(context, QPC_TST, to_hr_qp_type(ibqp->qp_type));
+ hr_reg_clear(qpc_mask, QPC_TST);
- roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
- V2_QPC_BYTE_16_PD_S, get_pdn(ibqp->pd));
+ hr_reg_write(context, QPC_PD, get_pdn(ibqp->pd));
+ hr_reg_clear(qpc_mask, QPC_PD);
- roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
- V2_QPC_BYTE_16_PD_S, 0);
+ hr_reg_write(context, QPC_RX_CQN, get_cqn(ibqp->recv_cq));
+ hr_reg_clear(qpc_mask, QPC_RX_CQN);
- roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
- V2_QPC_BYTE_80_RX_CQN_S, get_cqn(ibqp->recv_cq));
- roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
- V2_QPC_BYTE_80_RX_CQN_S, 0);
-
- roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
- V2_QPC_BYTE_252_TX_CQN_S, get_cqn(ibqp->send_cq));
- roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
- V2_QPC_BYTE_252_TX_CQN_S, 0);
+ hr_reg_write(context, QPC_TX_CQN, get_cqn(ibqp->send_cq));
+ hr_reg_clear(qpc_mask, QPC_TX_CQN);
if (ibqp->srq) {
- roce_set_bit(context->byte_76_srqn_op_en,
- V2_QPC_BYTE_76_SRQ_EN_S, 1);
- roce_set_bit(qpc_mask->byte_76_srqn_op_en,
- V2_QPC_BYTE_76_SRQ_EN_S, 0);
- roce_set_field(context->byte_76_srqn_op_en,
- V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
- to_hr_srq(ibqp->srq)->srqn);
- roce_set_field(qpc_mask->byte_76_srqn_op_en,
- V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
- }
-
- roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
- V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
- roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
- V2_QPC_BYTE_4_SQPN_S, 0);
+ hr_reg_enable(context, QPC_SRQ_EN);
+ hr_reg_clear(qpc_mask, QPC_SRQ_EN);
+ hr_reg_write(context, QPC_SRQN, to_hr_srq(ibqp->srq)->srqn);
+ hr_reg_clear(qpc_mask, QPC_SRQN);
+ }
if (attr_mask & IB_QP_DEST_QPN) {
- roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
- V2_QPC_BYTE_56_DQPN_S, hr_qp->qpn);
- roce_set_field(qpc_mask->byte_56_dqpn_err,
- V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
+ hr_reg_write(context, QPC_DQPN, hr_qp->qpn);
+ hr_reg_clear(qpc_mask, QPC_DQPN);
}
}
@@ -4325,74 +4206,46 @@ static int config_qp_rq_buf(struct hns_roce_dev *hr_dev,
* we should set all bits of the relevant fields in context mask to
* 0 at the same time, else set them to 0x1.
*/
- roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
- V2_QPC_BYTE_12_WQE_SGE_BA_S, wqe_sge_ba >> (32 + 3));
- roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
- V2_QPC_BYTE_12_WQE_SGE_BA_S, 0);
-
- roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
- V2_QPC_BYTE_12_SQ_HOP_NUM_S,
- to_hr_hem_hopnum(hr_dev->caps.wqe_sq_hop_num,
- hr_qp->sq.wqe_cnt));
- roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
- V2_QPC_BYTE_12_SQ_HOP_NUM_S, 0);
-
- roce_set_field(context->byte_20_smac_sgid_idx,
- V2_QPC_BYTE_20_SGE_HOP_NUM_M,
- V2_QPC_BYTE_20_SGE_HOP_NUM_S,
- to_hr_hem_hopnum(hr_dev->caps.wqe_sge_hop_num,
- hr_qp->sge.sge_cnt));
- roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
- V2_QPC_BYTE_20_SGE_HOP_NUM_M,
- V2_QPC_BYTE_20_SGE_HOP_NUM_S, 0);
-
- roce_set_field(context->byte_20_smac_sgid_idx,
- V2_QPC_BYTE_20_RQ_HOP_NUM_M,
- V2_QPC_BYTE_20_RQ_HOP_NUM_S,
- to_hr_hem_hopnum(hr_dev->caps.wqe_rq_hop_num,
- hr_qp->rq.wqe_cnt));
-
- roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
- V2_QPC_BYTE_20_RQ_HOP_NUM_M,
- V2_QPC_BYTE_20_RQ_HOP_NUM_S, 0);
-
- roce_set_field(context->byte_16_buf_ba_pg_sz,
- V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
- V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S,
- to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.ba_pg_shift));
- roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
- V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
- V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S, 0);
-
- roce_set_field(context->byte_16_buf_ba_pg_sz,
- V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
- V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S,
- to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.buf_pg_shift));
- roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
- V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
- V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, 0);
+ hr_reg_write(context, QPC_WQE_SGE_BA_H, wqe_sge_ba >> (32 + 3));
+ hr_reg_clear(qpc_mask, QPC_WQE_SGE_BA_H);
+
+ hr_reg_write(context, QPC_SQ_HOP_NUM,
+ to_hr_hem_hopnum(hr_dev->caps.wqe_sq_hop_num,
+ hr_qp->sq.wqe_cnt));
+ hr_reg_clear(qpc_mask, QPC_SQ_HOP_NUM);
+
+ hr_reg_write(context, QPC_SGE_HOP_NUM,
+ to_hr_hem_hopnum(hr_dev->caps.wqe_sge_hop_num,
+ hr_qp->sge.sge_cnt));
+ hr_reg_clear(qpc_mask, QPC_SGE_HOP_NUM);
+
+ hr_reg_write(context, QPC_RQ_HOP_NUM,
+ to_hr_hem_hopnum(hr_dev->caps.wqe_rq_hop_num,
+ hr_qp->rq.wqe_cnt));
+
+ hr_reg_clear(qpc_mask, QPC_RQ_HOP_NUM);
+
+ hr_reg_write(context, QPC_WQE_SGE_BA_PG_SZ,
+ to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.ba_pg_shift));
+ hr_reg_clear(qpc_mask, QPC_WQE_SGE_BA_PG_SZ);
+
+ hr_reg_write(context, QPC_WQE_SGE_BUF_PG_SZ,
+ to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.buf_pg_shift));
+ hr_reg_clear(qpc_mask, QPC_WQE_SGE_BUF_PG_SZ);
context->rq_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[0]));
qpc_mask->rq_cur_blk_addr = 0;
- roce_set_field(context->byte_92_srq_info,
- V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
- V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S,
- upper_32_bits(to_hr_hw_page_addr(mtts[0])));
- roce_set_field(qpc_mask->byte_92_srq_info,
- V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
- V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S, 0);
+ hr_reg_write(context, QPC_RQ_CUR_BLK_ADDR_H,
+ upper_32_bits(to_hr_hw_page_addr(mtts[0])));
+ hr_reg_clear(qpc_mask, QPC_RQ_CUR_BLK_ADDR_H);
context->rq_nxt_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[1]));
qpc_mask->rq_nxt_blk_addr = 0;
- roce_set_field(context->byte_104_rq_sge,
- V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
- V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S,
- upper_32_bits(to_hr_hw_page_addr(mtts[1])));
- roce_set_field(qpc_mask->byte_104_rq_sge,
- V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
- V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S, 0);
+ hr_reg_write(context, QPC_RQ_NXT_BLK_ADDR_H,
+ upper_32_bits(to_hr_hw_page_addr(mtts[1])));
+ hr_reg_clear(qpc_mask, QPC_RQ_NXT_BLK_ADDR_H);
return 0;
}
@@ -4431,37 +4284,26 @@ static int config_qp_sq_buf(struct hns_roce_dev *hr_dev,
* we should set all bits of the relevant fields in context mask to
* 0 at the same time, else set them to 0x1.
*/
- context->sq_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(sq_cur_blk));
- roce_set_field(context->byte_168_irrl_idx,
- V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
- V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S,
- upper_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
- qpc_mask->sq_cur_blk_addr = 0;
- roce_set_field(qpc_mask->byte_168_irrl_idx,
- V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
- V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, 0);
-
- context->sq_cur_sge_blk_addr =
- cpu_to_le32(to_hr_hw_page_addr(sge_cur_blk));
- roce_set_field(context->byte_184_irrl_idx,
- V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
- V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S,
- upper_32_bits(to_hr_hw_page_addr(sge_cur_blk)));
- qpc_mask->sq_cur_sge_blk_addr = 0;
- roce_set_field(qpc_mask->byte_184_irrl_idx,
- V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
- V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S, 0);
-
- context->rx_sq_cur_blk_addr =
- cpu_to_le32(to_hr_hw_page_addr(sq_cur_blk));
- roce_set_field(context->byte_232_irrl_sge,
- V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
- V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S,
- upper_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
- qpc_mask->rx_sq_cur_blk_addr = 0;
- roce_set_field(qpc_mask->byte_232_irrl_sge,
- V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
- V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S, 0);
+ hr_reg_write(context, QPC_SQ_CUR_BLK_ADDR_L,
+ lower_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
+ hr_reg_write(context, QPC_SQ_CUR_BLK_ADDR_H,
+ upper_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
+ hr_reg_clear(qpc_mask, QPC_SQ_CUR_BLK_ADDR_L);
+ hr_reg_clear(qpc_mask, QPC_SQ_CUR_BLK_ADDR_H);
+
+ hr_reg_write(context, QPC_SQ_CUR_SGE_BLK_ADDR_L,
+ lower_32_bits(to_hr_hw_page_addr(sge_cur_blk)));
+ hr_reg_write(context, QPC_SQ_CUR_SGE_BLK_ADDR_H,
+ upper_32_bits(to_hr_hw_page_addr(sge_cur_blk)));
+ hr_reg_clear(qpc_mask, QPC_SQ_CUR_SGE_BLK_ADDR_L);
+ hr_reg_clear(qpc_mask, QPC_SQ_CUR_SGE_BLK_ADDR_H);
+
+ hr_reg_write(context, QPC_RX_SQ_CUR_BLK_ADDR_L,
+ lower_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
+ hr_reg_write(context, QPC_RX_SQ_CUR_BLK_ADDR_H,
+ upper_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
+ hr_reg_clear(qpc_mask, QPC_RX_SQ_CUR_BLK_ADDR_L);
+ hr_reg_clear(qpc_mask, QPC_RX_SQ_CUR_BLK_ADDR_H);
return 0;
}
@@ -4485,12 +4327,13 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
struct ib_device *ibdev = &hr_dev->ib_dev;
dma_addr_t trrl_ba;
dma_addr_t irrl_ba;
- enum ib_mtu mtu;
+ enum ib_mtu ib_mtu;
u8 lp_pktn_ini;
u64 *mtts;
u8 *dmac;
u8 *smac;
u32 port;
+ int mtu;
int ret;
ret = config_qp_rq_buf(hr_dev, hr_qp, context, qpc_mask);
@@ -4521,33 +4364,23 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
return -EINVAL;
}
- roce_set_field(context->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
- V2_QPC_BYTE_132_TRRL_BA_S, trrl_ba >> 4);
- roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
- V2_QPC_BYTE_132_TRRL_BA_S, 0);
+ hr_reg_write(context, QPC_TRRL_BA_L, trrl_ba >> 4);
+ hr_reg_clear(qpc_mask, QPC_TRRL_BA_L);
context->trrl_ba = cpu_to_le32(trrl_ba >> (16 + 4));
qpc_mask->trrl_ba = 0;
- roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
- V2_QPC_BYTE_140_TRRL_BA_S,
- (u32)(trrl_ba >> (32 + 16 + 4)));
- roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
- V2_QPC_BYTE_140_TRRL_BA_S, 0);
+ hr_reg_write(context, QPC_TRRL_BA_H, trrl_ba >> (32 + 16 + 4));
+ hr_reg_clear(qpc_mask, QPC_TRRL_BA_H);
context->irrl_ba = cpu_to_le32(irrl_ba >> 6);
qpc_mask->irrl_ba = 0;
- roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
- V2_QPC_BYTE_208_IRRL_BA_S,
- irrl_ba >> (32 + 6));
- roce_set_field(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
- V2_QPC_BYTE_208_IRRL_BA_S, 0);
+ hr_reg_write(context, QPC_IRRL_BA_H, irrl_ba >> (32 + 6));
+ hr_reg_clear(qpc_mask, QPC_IRRL_BA_H);
- roce_set_bit(context->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 1);
- roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 0);
+ hr_reg_enable(context, QPC_RMT_E2E);
+ hr_reg_clear(qpc_mask, QPC_RMT_E2E);
- roce_set_bit(context->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
- hr_qp->sq_signal_bits);
- roce_set_bit(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
- 0);
+ hr_reg_write(context, QPC_SIG_TYPE, hr_qp->sq_signal_bits);
+ hr_reg_clear(qpc_mask, QPC_SIG_TYPE);
port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port;
@@ -4556,73 +4389,56 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
/* when dmac equals smac or loop_idc is 1, it should loopback */
if (ether_addr_equal_unaligned(dmac, smac) ||
hr_dev->loop_idc == 0x1) {
- roce_set_bit(context->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 1);
- roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 0);
+ hr_reg_write(context, QPC_LBI, hr_dev->loop_idc);
+ hr_reg_clear(qpc_mask, QPC_LBI);
}
if (attr_mask & IB_QP_DEST_QPN) {
- roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
- V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num);
- roce_set_field(qpc_mask->byte_56_dqpn_err,
- V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
+ hr_reg_write(context, QPC_DQPN, attr->dest_qp_num);
+ hr_reg_clear(qpc_mask, QPC_DQPN);
}
memcpy(&(context->dmac), dmac, sizeof(u32));
- roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
- V2_QPC_BYTE_52_DMAC_S, *((u16 *)(&dmac[4])));
+ hr_reg_write(context, QPC_DMAC_H, *((u16 *)(&dmac[4])));
qpc_mask->dmac = 0;
- roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
- V2_QPC_BYTE_52_DMAC_S, 0);
+ hr_reg_clear(qpc_mask, QPC_DMAC_H);
+
+ ib_mtu = get_mtu(ibqp, attr);
+ hr_qp->path_mtu = ib_mtu;
- mtu = get_mtu(ibqp, attr);
- hr_qp->path_mtu = mtu;
+ mtu = ib_mtu_enum_to_int(ib_mtu);
+ if (WARN_ON(mtu < 0))
+ return -EINVAL;
if (attr_mask & IB_QP_PATH_MTU) {
- roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
- V2_QPC_BYTE_24_MTU_S, mtu);
- roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
- V2_QPC_BYTE_24_MTU_S, 0);
+ hr_reg_write(context, QPC_MTU, ib_mtu);
+ hr_reg_clear(qpc_mask, QPC_MTU);
}
#define MAX_LP_MSG_LEN 65536
/* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 64KB */
- lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / ib_mtu_enum_to_int(mtu));
+ lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / mtu);
- roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
- V2_QPC_BYTE_56_LP_PKTN_INI_S, lp_pktn_ini);
- roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
- V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
+ hr_reg_write(context, QPC_LP_PKTN_INI, lp_pktn_ini);
+ hr_reg_clear(qpc_mask, QPC_LP_PKTN_INI);
/* ACK_REQ_FREQ should be larger than or equal to LP_PKTN_INI */
- roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
- V2_QPC_BYTE_172_ACK_REQ_FREQ_S, lp_pktn_ini);
- roce_set_field(qpc_mask->byte_172_sq_psn,
- V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
- V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 0);
-
- roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
- V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
- roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
- V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
- roce_set_field(qpc_mask->byte_108_rx_reqepsn,
- V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
- V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
+ hr_reg_write(context, QPC_ACK_REQ_FREQ, lp_pktn_ini);
+ hr_reg_clear(qpc_mask, QPC_ACK_REQ_FREQ);
+
+ hr_reg_clear(qpc_mask, QPC_RX_REQ_PSN_ERR);
+ hr_reg_clear(qpc_mask, QPC_RX_REQ_MSN);
+ hr_reg_clear(qpc_mask, QPC_RX_REQ_LAST_OPTYPE);
context->rq_rnr_timer = 0;
qpc_mask->rq_rnr_timer = 0;
- roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
- V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
- roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
- V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
+ hr_reg_clear(qpc_mask, QPC_TRRL_HEAD_MAX);
+ hr_reg_clear(qpc_mask, QPC_TRRL_TAIL_MAX);
/* rocee send 2^lp_sgen_ini segs every time */
- roce_set_field(context->byte_168_irrl_idx,
- V2_QPC_BYTE_168_LP_SGEN_INI_M,
- V2_QPC_BYTE_168_LP_SGEN_INI_S, 3);
- roce_set_field(qpc_mask->byte_168_irrl_idx,
- V2_QPC_BYTE_168_LP_SGEN_INI_M,
- V2_QPC_BYTE_168_LP_SGEN_INI_S, 0);
+ hr_reg_write(context, QPC_LP_SGEN_INI, 3);
+ hr_reg_clear(qpc_mask, QPC_LP_SGEN_INI);
return 0;
}
@@ -4654,44 +4470,26 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
* of all fields in context are zero, we need not set them to 0 again.
* but we should set the relevant fields of context mask to 0.
*/
- roce_set_field(qpc_mask->byte_232_irrl_sge,
- V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
- V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
+ hr_reg_clear(qpc_mask, QPC_IRRL_SGE_IDX);
- roce_set_field(qpc_mask->byte_240_irrl_tail,
- V2_QPC_BYTE_240_RX_ACK_MSN_M,
- V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
+ hr_reg_clear(qpc_mask, QPC_RX_ACK_MSN);
- roce_set_field(qpc_mask->byte_248_ack_psn,
- V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
- V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
- roce_set_bit(qpc_mask->byte_248_ack_psn,
- V2_QPC_BYTE_248_IRRL_PSN_VLD_S, 0);
- roce_set_field(qpc_mask->byte_248_ack_psn,
- V2_QPC_BYTE_248_IRRL_PSN_M,
- V2_QPC_BYTE_248_IRRL_PSN_S, 0);
+ hr_reg_clear(qpc_mask, QPC_ACK_LAST_OPTYPE);
+ hr_reg_clear(qpc_mask, QPC_IRRL_PSN_VLD);
+ hr_reg_clear(qpc_mask, QPC_IRRL_PSN);
- roce_set_field(qpc_mask->byte_240_irrl_tail,
- V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
- V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
+ hr_reg_clear(qpc_mask, QPC_IRRL_TAIL_REAL);
- roce_set_field(qpc_mask->byte_220_retry_psn_msn,
- V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
- V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
+ hr_reg_clear(qpc_mask, QPC_RETRY_MSG_MSN);
- roce_set_bit(qpc_mask->byte_248_ack_psn,
- V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
+ hr_reg_clear(qpc_mask, QPC_RNR_RETRY_FLAG);
- roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
- V2_QPC_BYTE_212_CHECK_FLG_S, 0);
+ hr_reg_clear(qpc_mask, QPC_CHECK_FLG);
- roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
- V2_QPC_BYTE_212_LSN_S, 0x100);
- roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
- V2_QPC_BYTE_212_LSN_S, 0);
+ hr_reg_write(context, QPC_LSN, 0x100);
+ hr_reg_clear(qpc_mask, QPC_LSN);
- roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
- V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
+ hr_reg_clear(qpc_mask, QPC_V2_IRRL_HEAD);
return 0;
}
@@ -4758,6 +4556,11 @@ enum {
DIP_VALID,
};
+enum {
+ WND_LIMIT,
+ WND_UNLIMIT,
+};
+
static int check_cong_type(struct ib_qp *ibqp,
struct hns_roce_congestion_algorithm *cong_alg)
{
@@ -4769,21 +4572,25 @@ static int check_cong_type(struct ib_qp *ibqp,
cong_alg->alg_sel = CONG_DCQCN;
cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
cong_alg->dip_vld = DIP_INVALID;
+ cong_alg->wnd_mode_sel = WND_LIMIT;
break;
case CONG_TYPE_LDCP:
cong_alg->alg_sel = CONG_WINDOW;
cong_alg->alg_sub_sel = CONG_LDCP;
cong_alg->dip_vld = DIP_INVALID;
+ cong_alg->wnd_mode_sel = WND_UNLIMIT;
break;
case CONG_TYPE_HC3:
cong_alg->alg_sel = CONG_WINDOW;
cong_alg->alg_sub_sel = CONG_HC3;
cong_alg->dip_vld = DIP_INVALID;
+ cong_alg->wnd_mode_sel = WND_LIMIT;
break;
case CONG_TYPE_DIP:
cong_alg->alg_sel = CONG_DCQCN;
cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
cong_alg->dip_vld = DIP_VALID;
+ cong_alg->wnd_mode_sel = WND_LIMIT;
break;
default:
ibdev_err(&hr_dev->ib_dev,
@@ -4816,14 +4623,17 @@ static int fill_cong_field(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
hr_reg_write(context, QPC_CONG_ALGO_TMPL_ID, hr_dev->cong_algo_tmpl_id +
hr_dev->caps.cong_type * HNS_ROCE_CONG_SIZE);
- hr_reg_write(qpc_mask, QPC_CONG_ALGO_TMPL_ID, 0);
+ hr_reg_clear(qpc_mask, QPC_CONG_ALGO_TMPL_ID);
hr_reg_write(&context->ext, QPCEX_CONG_ALG_SEL, cong_field.alg_sel);
- hr_reg_write(&qpc_mask->ext, QPCEX_CONG_ALG_SEL, 0);
+ hr_reg_clear(&qpc_mask->ext, QPCEX_CONG_ALG_SEL);
hr_reg_write(&context->ext, QPCEX_CONG_ALG_SUB_SEL,
cong_field.alg_sub_sel);
- hr_reg_write(&qpc_mask->ext, QPCEX_CONG_ALG_SUB_SEL, 0);
+ hr_reg_clear(&qpc_mask->ext, QPCEX_CONG_ALG_SUB_SEL);
hr_reg_write(&context->ext, QPCEX_DIP_CTX_IDX_VLD, cong_field.dip_vld);
- hr_reg_write(&qpc_mask->ext, QPCEX_DIP_CTX_IDX_VLD, 0);
+ hr_reg_clear(&qpc_mask->ext, QPCEX_DIP_CTX_IDX_VLD);
+ hr_reg_write(&context->ext, QPCEX_SQ_RQ_NOT_FORBID_EN,
+ cong_field.wnd_mode_sel);
+ hr_reg_clear(&qpc_mask->ext, QPCEX_SQ_RQ_NOT_FORBID_EN);
/* if dip is disabled, there is no need to set dip idx */
if (cong_field.dip_vld == 0)
@@ -4878,20 +4688,14 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
/* Only HIP08 needs to set the vlan_en bits in QPC */
if (vlan_id < VLAN_N_VID &&
hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
- roce_set_bit(context->byte_76_srqn_op_en,
- V2_QPC_BYTE_76_RQ_VLAN_EN_S, 1);
- roce_set_bit(qpc_mask->byte_76_srqn_op_en,
- V2_QPC_BYTE_76_RQ_VLAN_EN_S, 0);
- roce_set_bit(context->byte_168_irrl_idx,
- V2_QPC_BYTE_168_SQ_VLAN_EN_S, 1);
- roce_set_bit(qpc_mask->byte_168_irrl_idx,
- V2_QPC_BYTE_168_SQ_VLAN_EN_S, 0);
+ hr_reg_enable(context, QPC_RQ_VLAN_EN);
+ hr_reg_clear(qpc_mask, QPC_RQ_VLAN_EN);
+ hr_reg_enable(context, QPC_SQ_VLAN_EN);
+ hr_reg_clear(qpc_mask, QPC_SQ_VLAN_EN);
}
- roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
- V2_QPC_BYTE_24_VLAN_ID_S, vlan_id);
- roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
- V2_QPC_BYTE_24_VLAN_ID_S, 0);
+ hr_reg_write(context, QPC_VLAN_ID, vlan_id);
+ hr_reg_clear(qpc_mask, QPC_VLAN_ID);
if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) {
ibdev_err(ibdev, "sgid_index(%u) too large. max is %d\n",
@@ -4904,39 +4708,28 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
return -EINVAL;
}
- roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_UDPSPN_M,
- V2_QPC_BYTE_52_UDPSPN_S,
- is_udp ? get_udp_sport(grh->flow_label, ibqp->qp_num,
- attr->dest_qp_num) : 0);
+ hr_reg_write(context, QPC_UDPSPN,
+ is_udp ? get_udp_sport(grh->flow_label, ibqp->qp_num,
+ attr->dest_qp_num) : 0);
- roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_UDPSPN_M,
- V2_QPC_BYTE_52_UDPSPN_S, 0);
+ hr_reg_clear(qpc_mask, QPC_UDPSPN);
- roce_set_field(context->byte_20_smac_sgid_idx,
- V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S,
- grh->sgid_index);
+ hr_reg_write(context, QPC_GMV_IDX, grh->sgid_index);
- roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
- V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S, 0);
+ hr_reg_clear(qpc_mask, QPC_GMV_IDX);
- roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
- V2_QPC_BYTE_24_HOP_LIMIT_S, grh->hop_limit);
- roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
- V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
+ hr_reg_write(context, QPC_HOPLIMIT, grh->hop_limit);
+ hr_reg_clear(qpc_mask, QPC_HOPLIMIT);
ret = fill_cong_field(ibqp, attr, context, qpc_mask);
if (ret)
return ret;
- roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
- V2_QPC_BYTE_24_TC_S, get_tclass(&attr->ah_attr.grh));
- roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
- V2_QPC_BYTE_24_TC_S, 0);
+ hr_reg_write(context, QPC_TC, get_tclass(&attr->ah_attr.grh));
+ hr_reg_clear(qpc_mask, QPC_TC);
- roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
- V2_QPC_BYTE_28_FL_S, grh->flow_label);
- roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
- V2_QPC_BYTE_28_FL_S, 0);
+ hr_reg_write(context, QPC_FL, grh->flow_label);
+ hr_reg_clear(qpc_mask, QPC_FL);
memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
@@ -4948,10 +4741,8 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
return -EINVAL;
}
- roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
- V2_QPC_BYTE_28_SL_S, hr_qp->sl);
- roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
- V2_QPC_BYTE_28_SL_S, 0);
+ hr_reg_write(context, QPC_SL, hr_qp->sl);
+ hr_reg_clear(qpc_mask, QPC_SL);
return 0;
}
@@ -5033,12 +4824,8 @@ static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
if (attr_mask & IB_QP_TIMEOUT) {
if (attr->timeout < 31) {
- roce_set_field(context->byte_28_at_fl,
- V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
- attr->timeout);
- roce_set_field(qpc_mask->byte_28_at_fl,
- V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
- 0);
+ hr_reg_write(context, QPC_AT, attr->timeout);
+ hr_reg_clear(qpc_mask, QPC_AT);
} else {
ibdev_warn(&hr_dev->ib_dev,
"Local ACK timeout shall be 0 to 30.\n");
@@ -5046,128 +4833,68 @@ static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
}
if (attr_mask & IB_QP_RETRY_CNT) {
- roce_set_field(context->byte_212_lsn,
- V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
- V2_QPC_BYTE_212_RETRY_NUM_INIT_S,
- attr->retry_cnt);
- roce_set_field(qpc_mask->byte_212_lsn,
- V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
- V2_QPC_BYTE_212_RETRY_NUM_INIT_S, 0);
-
- roce_set_field(context->byte_212_lsn,
- V2_QPC_BYTE_212_RETRY_CNT_M,
- V2_QPC_BYTE_212_RETRY_CNT_S, attr->retry_cnt);
- roce_set_field(qpc_mask->byte_212_lsn,
- V2_QPC_BYTE_212_RETRY_CNT_M,
- V2_QPC_BYTE_212_RETRY_CNT_S, 0);
+ hr_reg_write(context, QPC_RETRY_NUM_INIT, attr->retry_cnt);
+ hr_reg_clear(qpc_mask, QPC_RETRY_NUM_INIT);
+
+ hr_reg_write(context, QPC_RETRY_CNT, attr->retry_cnt);
+ hr_reg_clear(qpc_mask, QPC_RETRY_CNT);
}
if (attr_mask & IB_QP_RNR_RETRY) {
- roce_set_field(context->byte_244_rnr_rxack,
- V2_QPC_BYTE_244_RNR_NUM_INIT_M,
- V2_QPC_BYTE_244_RNR_NUM_INIT_S, attr->rnr_retry);
- roce_set_field(qpc_mask->byte_244_rnr_rxack,
- V2_QPC_BYTE_244_RNR_NUM_INIT_M,
- V2_QPC_BYTE_244_RNR_NUM_INIT_S, 0);
+ hr_reg_write(context, QPC_RNR_NUM_INIT, attr->rnr_retry);
+ hr_reg_clear(qpc_mask, QPC_RNR_NUM_INIT);
- roce_set_field(context->byte_244_rnr_rxack,
- V2_QPC_BYTE_244_RNR_CNT_M,
- V2_QPC_BYTE_244_RNR_CNT_S, attr->rnr_retry);
- roce_set_field(qpc_mask->byte_244_rnr_rxack,
- V2_QPC_BYTE_244_RNR_CNT_M,
- V2_QPC_BYTE_244_RNR_CNT_S, 0);
+ hr_reg_write(context, QPC_RNR_CNT, attr->rnr_retry);
+ hr_reg_clear(qpc_mask, QPC_RNR_CNT);
}
if (attr_mask & IB_QP_SQ_PSN) {
- roce_set_field(context->byte_172_sq_psn,
- V2_QPC_BYTE_172_SQ_CUR_PSN_M,
- V2_QPC_BYTE_172_SQ_CUR_PSN_S, attr->sq_psn);
- roce_set_field(qpc_mask->byte_172_sq_psn,
- V2_QPC_BYTE_172_SQ_CUR_PSN_M,
- V2_QPC_BYTE_172_SQ_CUR_PSN_S, 0);
-
- roce_set_field(context->byte_196_sq_psn,
- V2_QPC_BYTE_196_SQ_MAX_PSN_M,
- V2_QPC_BYTE_196_SQ_MAX_PSN_S, attr->sq_psn);
- roce_set_field(qpc_mask->byte_196_sq_psn,
- V2_QPC_BYTE_196_SQ_MAX_PSN_M,
- V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0);
-
- roce_set_field(context->byte_220_retry_psn_msn,
- V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
- V2_QPC_BYTE_220_RETRY_MSG_PSN_S, attr->sq_psn);
- roce_set_field(qpc_mask->byte_220_retry_psn_msn,
- V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
- V2_QPC_BYTE_220_RETRY_MSG_PSN_S, 0);
-
- roce_set_field(context->byte_224_retry_msg,
- V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
- V2_QPC_BYTE_224_RETRY_MSG_PSN_S,
- attr->sq_psn >> V2_QPC_BYTE_220_RETRY_MSG_PSN_S);
- roce_set_field(qpc_mask->byte_224_retry_msg,
- V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
- V2_QPC_BYTE_224_RETRY_MSG_PSN_S, 0);
-
- roce_set_field(context->byte_224_retry_msg,
- V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
- V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S,
- attr->sq_psn);
- roce_set_field(qpc_mask->byte_224_retry_msg,
- V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
- V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, 0);
-
- roce_set_field(context->byte_244_rnr_rxack,
- V2_QPC_BYTE_244_RX_ACK_EPSN_M,
- V2_QPC_BYTE_244_RX_ACK_EPSN_S, attr->sq_psn);
- roce_set_field(qpc_mask->byte_244_rnr_rxack,
- V2_QPC_BYTE_244_RX_ACK_EPSN_M,
- V2_QPC_BYTE_244_RX_ACK_EPSN_S, 0);
+ hr_reg_write(context, QPC_SQ_CUR_PSN, attr->sq_psn);
+ hr_reg_clear(qpc_mask, QPC_SQ_CUR_PSN);
+
+ hr_reg_write(context, QPC_SQ_MAX_PSN, attr->sq_psn);
+ hr_reg_clear(qpc_mask, QPC_SQ_MAX_PSN);
+
+ hr_reg_write(context, QPC_RETRY_MSG_PSN_L, attr->sq_psn);
+ hr_reg_clear(qpc_mask, QPC_RETRY_MSG_PSN_L);
+
+ hr_reg_write(context, QPC_RETRY_MSG_PSN_H,
+ attr->sq_psn >> RETRY_MSG_PSN_SHIFT);
+ hr_reg_clear(qpc_mask, QPC_RETRY_MSG_PSN_H);
+
+ hr_reg_write(context, QPC_RETRY_MSG_FPKT_PSN, attr->sq_psn);
+ hr_reg_clear(qpc_mask, QPC_RETRY_MSG_FPKT_PSN);
+
+ hr_reg_write(context, QPC_RX_ACK_EPSN, attr->sq_psn);
+ hr_reg_clear(qpc_mask, QPC_RX_ACK_EPSN);
}
if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) &&
attr->max_dest_rd_atomic) {
- roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
- V2_QPC_BYTE_140_RR_MAX_S,
- fls(attr->max_dest_rd_atomic - 1));
- roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
- V2_QPC_BYTE_140_RR_MAX_S, 0);
+ hr_reg_write(context, QPC_RR_MAX,
+ fls(attr->max_dest_rd_atomic - 1));
+ hr_reg_clear(qpc_mask, QPC_RR_MAX);
}
if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
- roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
- V2_QPC_BYTE_208_SR_MAX_S,
- fls(attr->max_rd_atomic - 1));
- roce_set_field(qpc_mask->byte_208_irrl,
- V2_QPC_BYTE_208_SR_MAX_M,
- V2_QPC_BYTE_208_SR_MAX_S, 0);
+ hr_reg_write(context, QPC_SR_MAX, fls(attr->max_rd_atomic - 1));
+ hr_reg_clear(qpc_mask, QPC_SR_MAX);
}
if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
if (attr_mask & IB_QP_MIN_RNR_TIMER) {
- roce_set_field(context->byte_80_rnr_rx_cqn,
- V2_QPC_BYTE_80_MIN_RNR_TIME_M,
- V2_QPC_BYTE_80_MIN_RNR_TIME_S,
- attr->min_rnr_timer);
- roce_set_field(qpc_mask->byte_80_rnr_rx_cqn,
- V2_QPC_BYTE_80_MIN_RNR_TIME_M,
- V2_QPC_BYTE_80_MIN_RNR_TIME_S, 0);
+ hr_reg_write(context, QPC_MIN_RNR_TIME, attr->min_rnr_timer);
+ hr_reg_clear(qpc_mask, QPC_MIN_RNR_TIME);
}
if (attr_mask & IB_QP_RQ_PSN) {
- roce_set_field(context->byte_108_rx_reqepsn,
- V2_QPC_BYTE_108_RX_REQ_EPSN_M,
- V2_QPC_BYTE_108_RX_REQ_EPSN_S, attr->rq_psn);
- roce_set_field(qpc_mask->byte_108_rx_reqepsn,
- V2_QPC_BYTE_108_RX_REQ_EPSN_M,
- V2_QPC_BYTE_108_RX_REQ_EPSN_S, 0);
+ hr_reg_write(context, QPC_RX_REQ_EPSN, attr->rq_psn);
+ hr_reg_clear(qpc_mask, QPC_RX_REQ_EPSN);
- roce_set_field(context->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
- V2_QPC_BYTE_152_RAQ_PSN_S, attr->rq_psn - 1);
- roce_set_field(qpc_mask->byte_152_raq,
- V2_QPC_BYTE_152_RAQ_PSN_M,
- V2_QPC_BYTE_152_RAQ_PSN_S, 0);
+ hr_reg_write(context, QPC_RAQ_PSN, attr->rq_psn - 1);
+ hr_reg_clear(qpc_mask, QPC_RAQ_PSN);
}
if (attr_mask & IB_QP_QKEY) {
@@ -5220,6 +4947,32 @@ static void clear_qp(struct hns_roce_qp *hr_qp)
hr_qp->next_sge = 0;
}
+static void v2_set_flushed_fields(struct ib_qp *ibqp,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask)
+{
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ unsigned long sq_flag = 0;
+ unsigned long rq_flag = 0;
+
+ if (ibqp->qp_type == IB_QPT_XRC_TGT)
+ return;
+
+ spin_lock_irqsave(&hr_qp->sq.lock, sq_flag);
+ hr_reg_write(context, QPC_SQ_PRODUCER_IDX, hr_qp->sq.head);
+ hr_reg_clear(qpc_mask, QPC_SQ_PRODUCER_IDX);
+ hr_qp->state = IB_QPS_ERR;
+ spin_unlock_irqrestore(&hr_qp->sq.lock, sq_flag);
+
+ if (ibqp->srq || ibqp->qp_type == IB_QPT_XRC_INI) /* no RQ */
+ return;
+
+ spin_lock_irqsave(&hr_qp->rq.lock, rq_flag);
+ hr_reg_write(context, QPC_RQ_PRODUCER_IDX, hr_qp->rq.head);
+ hr_reg_clear(qpc_mask, QPC_RQ_PRODUCER_IDX);
+ spin_unlock_irqrestore(&hr_qp->rq.lock, rq_flag);
+}
+
static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
int attr_mask, enum ib_qp_state cur_state,
@@ -5231,8 +4984,6 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
struct hns_roce_v2_qp_context *context = ctx;
struct hns_roce_v2_qp_context *qpc_mask = ctx + 1;
struct ib_device *ibdev = &hr_dev->ib_dev;
- unsigned long sq_flag = 0;
- unsigned long rq_flag = 0;
int ret;
if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
@@ -5253,34 +5004,8 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
goto out;
/* When QP state is err, SQ and RQ WQE should be flushed */
- if (new_state == IB_QPS_ERR) {
- if (ibqp->qp_type != IB_QPT_XRC_TGT) {
- spin_lock_irqsave(&hr_qp->sq.lock, sq_flag);
- hr_qp->state = IB_QPS_ERR;
- roce_set_field(context->byte_160_sq_ci_pi,
- V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
- V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S,
- hr_qp->sq.head);
- roce_set_field(qpc_mask->byte_160_sq_ci_pi,
- V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
- V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
- spin_unlock_irqrestore(&hr_qp->sq.lock, sq_flag);
- }
-
- if (!ibqp->srq && ibqp->qp_type != IB_QPT_XRC_INI &&
- ibqp->qp_type != IB_QPT_XRC_TGT) {
- spin_lock_irqsave(&hr_qp->rq.lock, rq_flag);
- hr_qp->state = IB_QPS_ERR;
- roce_set_field(context->byte_84_rq_ci_pi,
- V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
- V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S,
- hr_qp->rq.head);
- roce_set_field(qpc_mask->byte_84_rq_ci_pi,
- V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
- V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
- spin_unlock_irqrestore(&hr_qp->rq.lock, rq_flag);
- }
- }
+ if (new_state == IB_QPS_ERR)
+ v2_set_flushed_fields(ibqp, context, qpc_mask);
/* Configure the optional fields */
ret = hns_roce_v2_set_opt_fields(ibqp, attr, attr_mask, context,
@@ -5288,17 +5013,14 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
if (ret)
goto out;
- roce_set_bit(context->byte_108_rx_reqepsn, V2_QPC_BYTE_108_INV_CREDIT_S,
- ((to_hr_qp_type(hr_qp->ibqp.qp_type) == SERV_TYPE_XRC) ||
- ibqp->srq) ? 1 : 0);
- roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
- V2_QPC_BYTE_108_INV_CREDIT_S, 0);
+ hr_reg_write_bool(context, QPC_INV_CREDIT,
+ to_hr_qp_type(hr_qp->ibqp.qp_type) == SERV_TYPE_XRC ||
+ ibqp->srq);
+ hr_reg_clear(qpc_mask, QPC_INV_CREDIT);
/* Every status migrate must change state */
- roce_set_field(context->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M,
- V2_QPC_BYTE_60_QP_ST_S, new_state);
- roce_set_field(qpc_mask->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M,
- V2_QPC_BYTE_60_QP_ST_S, 0);
+ hr_reg_write(context, QPC_QP_ST, new_state);
+ hr_reg_clear(qpc_mask, QPC_QP_ST);
/* SW pass context to HW */
ret = hns_roce_v2_qp_modify(hr_dev, context, qpc_mask, hr_qp);
@@ -5388,8 +5110,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
goto out;
}
- state = roce_get_field(context.byte_60_qpst_tempid,
- V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S);
+ state = hr_reg_read(&context, QPC_QP_ST);
tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state);
if (tmp_qp_state == -1) {
ibdev_err(ibdev, "Illegal ib_qp_state\n");
@@ -5398,77 +5119,45 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
}
hr_qp->state = (u8)tmp_qp_state;
qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
- qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context.byte_24_mtu_tc,
- V2_QPC_BYTE_24_MTU_M,
- V2_QPC_BYTE_24_MTU_S);
+ qp_attr->path_mtu = (enum ib_mtu)hr_reg_read(&context, QPC_MTU);
qp_attr->path_mig_state = IB_MIG_ARMED;
- qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
+ qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
if (hr_qp->ibqp.qp_type == IB_QPT_UD)
qp_attr->qkey = le32_to_cpu(context.qkey_xrcd);
- qp_attr->rq_psn = roce_get_field(context.byte_108_rx_reqepsn,
- V2_QPC_BYTE_108_RX_REQ_EPSN_M,
- V2_QPC_BYTE_108_RX_REQ_EPSN_S);
- qp_attr->sq_psn = (u32)roce_get_field(context.byte_172_sq_psn,
- V2_QPC_BYTE_172_SQ_CUR_PSN_M,
- V2_QPC_BYTE_172_SQ_CUR_PSN_S);
- qp_attr->dest_qp_num = (u8)roce_get_field(context.byte_56_dqpn_err,
- V2_QPC_BYTE_56_DQPN_M,
- V2_QPC_BYTE_56_DQPN_S);
- qp_attr->qp_access_flags = ((roce_get_bit(context.byte_76_srqn_op_en,
- V2_QPC_BYTE_76_RRE_S)) << V2_QP_RRE_S) |
- ((roce_get_bit(context.byte_76_srqn_op_en,
- V2_QPC_BYTE_76_RWE_S)) << V2_QP_RWE_S) |
- ((roce_get_bit(context.byte_76_srqn_op_en,
- V2_QPC_BYTE_76_ATE_S)) << V2_QP_ATE_S);
+ qp_attr->rq_psn = hr_reg_read(&context, QPC_RX_REQ_EPSN);
+ qp_attr->sq_psn = (u32)hr_reg_read(&context, QPC_SQ_CUR_PSN);
+ qp_attr->dest_qp_num = (u8)hr_reg_read(&context, QPC_DQPN);
+ qp_attr->qp_access_flags =
+ ((hr_reg_read(&context, QPC_RRE)) << V2_QP_RRE_S) |
+ ((hr_reg_read(&context, QPC_RWE)) << V2_QP_RWE_S) |
+ ((hr_reg_read(&context, QPC_ATE)) << V2_QP_ATE_S);
if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT) {
struct ib_global_route *grh =
- rdma_ah_retrieve_grh(&qp_attr->ah_attr);
+ rdma_ah_retrieve_grh(&qp_attr->ah_attr);
rdma_ah_set_sl(&qp_attr->ah_attr,
- roce_get_field(context.byte_28_at_fl,
- V2_QPC_BYTE_28_SL_M,
- V2_QPC_BYTE_28_SL_S));
- grh->flow_label = roce_get_field(context.byte_28_at_fl,
- V2_QPC_BYTE_28_FL_M,
- V2_QPC_BYTE_28_FL_S);
- grh->sgid_index = roce_get_field(context.byte_20_smac_sgid_idx,
- V2_QPC_BYTE_20_SGID_IDX_M,
- V2_QPC_BYTE_20_SGID_IDX_S);
- grh->hop_limit = roce_get_field(context.byte_24_mtu_tc,
- V2_QPC_BYTE_24_HOP_LIMIT_M,
- V2_QPC_BYTE_24_HOP_LIMIT_S);
- grh->traffic_class = roce_get_field(context.byte_24_mtu_tc,
- V2_QPC_BYTE_24_TC_M,
- V2_QPC_BYTE_24_TC_S);
+ hr_reg_read(&context, QPC_SL));
+ grh->flow_label = hr_reg_read(&context, QPC_FL);
+ grh->sgid_index = hr_reg_read(&context, QPC_GMV_IDX);
+ grh->hop_limit = hr_reg_read(&context, QPC_HOPLIMIT);
+ grh->traffic_class = hr_reg_read(&context, QPC_TC);
memcpy(grh->dgid.raw, context.dgid, sizeof(grh->dgid.raw));
}
qp_attr->port_num = hr_qp->port + 1;
qp_attr->sq_draining = 0;
- qp_attr->max_rd_atomic = 1 << roce_get_field(context.byte_208_irrl,
- V2_QPC_BYTE_208_SR_MAX_M,
- V2_QPC_BYTE_208_SR_MAX_S);
- qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context.byte_140_raq,
- V2_QPC_BYTE_140_RR_MAX_M,
- V2_QPC_BYTE_140_RR_MAX_S);
-
- qp_attr->min_rnr_timer = (u8)roce_get_field(context.byte_80_rnr_rx_cqn,
- V2_QPC_BYTE_80_MIN_RNR_TIME_M,
- V2_QPC_BYTE_80_MIN_RNR_TIME_S);
- qp_attr->timeout = (u8)roce_get_field(context.byte_28_at_fl,
- V2_QPC_BYTE_28_AT_M,
- V2_QPC_BYTE_28_AT_S);
- qp_attr->retry_cnt = roce_get_field(context.byte_212_lsn,
- V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
- V2_QPC_BYTE_212_RETRY_NUM_INIT_S);
- qp_attr->rnr_retry = roce_get_field(context.byte_244_rnr_rxack,
- V2_QPC_BYTE_244_RNR_NUM_INIT_M,
- V2_QPC_BYTE_244_RNR_NUM_INIT_S);
+ qp_attr->max_rd_atomic = 1 << hr_reg_read(&context, QPC_SR_MAX);
+ qp_attr->max_dest_rd_atomic = 1 << hr_reg_read(&context, QPC_RR_MAX);
+
+ qp_attr->min_rnr_timer = (u8)hr_reg_read(&context, QPC_MIN_RNR_TIME);
+ qp_attr->timeout = (u8)hr_reg_read(&context, QPC_AT);
+ qp_attr->retry_cnt = hr_reg_read(&context, QPC_RETRY_NUM_INIT);
+ qp_attr->rnr_retry = hr_reg_read(&context, QPC_RNR_NUM_INIT);
done:
qp_attr->cur_qp_state = qp_attr->qp_state;
@@ -5476,14 +5165,14 @@ done:
qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
qp_attr->cap.max_inline_data = hr_qp->max_inline_data;
- if (!ibqp->uobject) {
- qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
- qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
- } else {
- qp_attr->cap.max_send_wr = 0;
- qp_attr->cap.max_send_sge = 0;
- }
+ qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
+ qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
+ qp_init_attr->qp_context = ibqp->qp_context;
+ qp_init_attr->qp_type = ibqp->qp_type;
+ qp_init_attr->recv_cq = ibqp->recv_cq;
+ qp_init_attr->send_cq = ibqp->send_cq;
+ qp_init_attr->srq = ibqp->srq;
qp_init_attr->cap = qp_attr->cap;
qp_init_attr->sq_sig_type = hr_qp->sq_signal_bits;
@@ -5689,8 +5378,8 @@ static int hns_roce_v2_write_srqc(struct hns_roce_srq *srq, void *mb_buf)
}
hr_reg_write(ctx, SRQC_SRQ_ST, 1);
- hr_reg_write(ctx, SRQC_SRQ_TYPE,
- !!(srq->ibsrq.srq_type == IB_SRQT_XRC));
+ hr_reg_write_bool(ctx, SRQC_SRQ_TYPE,
+ srq->ibsrq.srq_type == IB_SRQT_XRC);
hr_reg_write(ctx, SRQC_PD, to_hr_pd(srq->ibsrq.pd)->pdn);
hr_reg_write(ctx, SRQC_SRQN, srq->srqn);
hr_reg_write(ctx, SRQC_XRCD, srq->xrcdn);
@@ -5744,12 +5433,8 @@ static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
memset(srqc_mask, 0xff, sizeof(*srqc_mask));
- roce_set_field(srq_context->byte_8_limit_wl,
- SRQC_BYTE_8_SRQ_LIMIT_WL_M,
- SRQC_BYTE_8_SRQ_LIMIT_WL_S, srq_attr->srq_limit);
- roce_set_field(srqc_mask->byte_8_limit_wl,
- SRQC_BYTE_8_SRQ_LIMIT_WL_M,
- SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
+ hr_reg_write(srq_context, SRQC_LIMIT_WL, srq_attr->srq_limit);
+ hr_reg_clear(srqc_mask, SRQC_LIMIT_WL);
ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, srq->srqn, 0,
HNS_ROCE_CMD_MODIFY_SRQC,
@@ -5772,7 +5457,6 @@ static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
struct hns_roce_srq *srq = to_hr_srq(ibsrq);
struct hns_roce_srq_context *srq_context;
struct hns_roce_cmd_mailbox *mailbox;
- int limit_wl;
int ret;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
@@ -5790,11 +5474,7 @@ static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
goto out;
}
- limit_wl = roce_get_field(srq_context->byte_8_limit_wl,
- SRQC_BYTE_8_SRQ_LIMIT_WL_M,
- SRQC_BYTE_8_SRQ_LIMIT_WL_S);
-
- attr->srq_limit = limit_wl;
+ attr->srq_limit = hr_reg_read(srq_context, SRQC_LIMIT_WL);
attr->max_wr = srq->wqe_cnt;
attr->max_sge = srq->max_gs - srq->rsv_sge;
@@ -5821,18 +5501,10 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
memset(cqc_mask, 0xff, sizeof(*cqc_mask));
- roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
- V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
- cq_count);
- roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
- V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
- 0);
- roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
- V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
- cq_period);
- roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
- V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
- 0);
+ hr_reg_write(cq_context, CQC_CQ_MAX_CNT, cq_count);
+ hr_reg_clear(cqc_mask, CQC_CQ_MAX_CNT);
+ hr_reg_write(cq_context, CQC_CQ_PERIOD, cq_period);
+ hr_reg_clear(cqc_mask, CQC_CQ_PERIOD);
ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 1,
HNS_ROCE_CMD_MODIFY_CQC,
@@ -5933,22 +5605,20 @@ static void update_eq_db(struct hns_roce_eq *eq)
struct hns_roce_v2_db eq_db = {};
if (eq->type_flag == HNS_ROCE_AEQ) {
- roce_set_field(eq_db.byte_4, V2_EQ_DB_CMD_M, V2_EQ_DB_CMD_S,
- eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
- HNS_ROCE_EQ_DB_CMD_AEQ :
- HNS_ROCE_EQ_DB_CMD_AEQ_ARMED);
+ hr_reg_write(&eq_db, EQ_DB_CMD,
+ eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
+ HNS_ROCE_EQ_DB_CMD_AEQ :
+ HNS_ROCE_EQ_DB_CMD_AEQ_ARMED);
} else {
- roce_set_field(eq_db.byte_4, V2_EQ_DB_TAG_M, V2_EQ_DB_TAG_S,
- eq->eqn);
+ hr_reg_write(&eq_db, EQ_DB_TAG, eq->eqn);
- roce_set_field(eq_db.byte_4, V2_EQ_DB_CMD_M, V2_EQ_DB_CMD_S,
- eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
- HNS_ROCE_EQ_DB_CMD_CEQ :
- HNS_ROCE_EQ_DB_CMD_CEQ_ARMED);
+ hr_reg_write(&eq_db, EQ_DB_CMD,
+ eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
+ HNS_ROCE_EQ_DB_CMD_CEQ :
+ HNS_ROCE_EQ_DB_CMD_CEQ_ARMED);
}
- roce_set_field(eq_db.parameter, V2_EQ_DB_CONS_IDX_M,
- V2_EQ_DB_CONS_IDX_S, eq->cons_index);
+ hr_reg_write(&eq_db, EQ_DB_CI, eq->cons_index);
hns_roce_write64(hr_dev, (__le32 *)&eq_db, eq->db_reg);
}
@@ -6240,8 +5910,7 @@ static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
hr_reg_write(eqc, EQC_EQ_CONS_INDX, HNS_ROCE_EQ_INIT_CONS_IDX);
hr_reg_write(eqc, EQC_NEX_EQE_BA_L, eqe_ba[1] >> 12);
hr_reg_write(eqc, EQC_NEX_EQE_BA_H, eqe_ba[1] >> 44);
- hr_reg_write(eqc, EQC_EQE_SIZE,
- !!(eq->eqe_size == HNS_ROCE_V3_EQE_SIZE));
+ hr_reg_write(eqc, EQC_EQE_SIZE, eq->eqe_size == HNS_ROCE_V3_EQE_SIZE);
return 0;
}
@@ -6256,14 +5925,14 @@ static int alloc_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
else
eq->hop_num = hr_dev->caps.eqe_hop_num;
- buf_attr.page_shift = hr_dev->caps.eqe_buf_pg_sz + HNS_HW_PAGE_SHIFT;
+ buf_attr.page_shift = hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT;
buf_attr.region[0].size = eq->entries * eq->eqe_size;
buf_attr.region[0].hopnum = eq->hop_num;
buf_attr.region_count = 1;
err = hns_roce_mtr_create(hr_dev, &eq->mtr, &buf_attr,
- hr_dev->caps.eqe_ba_pg_sz +
- HNS_HW_PAGE_SHIFT, NULL, 0);
+ hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT, NULL,
+ 0);
if (err)
dev_err(hr_dev->dev, "Failed to alloc EQE mtr, err %d\n", err);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index a2100a629859..b8a09d411e2e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -59,7 +59,7 @@
#define HNS_ROCE_V2_MAX_SQ_SGE_NUM 64
#define HNS_ROCE_V2_MAX_EXTEND_SGE_NUM 0x200000
#define HNS_ROCE_V2_MAX_SQ_INLINE 0x20
-#define HNS_ROCE_V2_MAX_SQ_INL_EXT 0x400
+#define HNS_ROCE_V3_MAX_SQ_INLINE 0x400
#define HNS_ROCE_V2_MAX_RC_INL_INN_SZ 32
#define HNS_ROCE_V2_UAR_NUM 256
#define HNS_ROCE_V2_PHY_UAR_NUM 1
@@ -93,6 +93,9 @@
#define HNS_ROCE_V3_SCCC_SZ 64
#define HNS_ROCE_V3_GMV_ENTRY_SZ 32
+#define HNS_ROCE_V2_EXT_LLM_ENTRY_SZ 8
+#define HNS_ROCE_V2_EXT_LLM_MAX_DEPTH 4096
+
#define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ PAGE_SIZE
#define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ PAGE_SIZE
#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000
@@ -162,6 +165,11 @@ enum {
REG_NXT_SE_CEQE = 0x3
};
+enum {
+ CQE_SIZE_32B = 0x0,
+ CQE_SIZE_64B = 0x1
+};
+
#define V2_CQ_DB_REQ_NOT_SOL 0
#define V2_CQ_DB_REQ_NOT 1
@@ -170,8 +178,6 @@ enum {
#define GID_LEN_V2 16
-#define HNS_ROCE_V2_CQE_QPN_MASK 0xfffff
-
enum {
HNS_ROCE_V2_WQE_OP_SEND = 0x0,
HNS_ROCE_V2_WQE_OP_SEND_WITH_INV = 0x1,
@@ -234,7 +240,6 @@ enum hns_roce_opcode_type {
HNS_ROCE_OPC_QUERY_PF_RES = 0x8400,
HNS_ROCE_OPC_ALLOC_VF_RES = 0x8401,
HNS_ROCE_OPC_CFG_EXT_LLM = 0x8403,
- HNS_ROCE_OPC_CFG_TMOUT_LLM = 0x8404,
HNS_ROCE_OPC_QUERY_PF_TIMER_RES = 0x8406,
HNS_ROCE_OPC_QUERY_FUNC_INFO = 0x8407,
HNS_ROCE_OPC_QUERY_PF_CAPS_NUM = 0x8408,
@@ -248,9 +253,11 @@ enum hns_roce_opcode_type {
HNS_ROCE_OPC_CLR_SCCC = 0x8509,
HNS_ROCE_OPC_QUERY_SCCC = 0x850a,
HNS_ROCE_OPC_RESET_SCCC = 0x850b,
+ HNS_ROCE_OPC_CLEAR_EXTDB_LIST_INFO = 0x850d,
HNS_ROCE_OPC_QUERY_VF_RES = 0x850e,
HNS_ROCE_OPC_CFG_GMV_TBL = 0x850f,
HNS_ROCE_OPC_CFG_GMV_BT = 0x8510,
+ HNS_ROCE_OPC_EXT_CFG = 0x8512,
HNS_SWITCH_PARAMETER_CFG = 0x1033,
};
@@ -304,67 +311,24 @@ struct hns_roce_v2_cq_context {
#define HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM 0x0
#define HNS_ROCE_V2_CQ_DEFAULT_INTERVAL 0x0
-#define V2_CQC_BYTE_4_CQ_ST_S 0
-#define V2_CQC_BYTE_4_CQ_ST_M GENMASK(1, 0)
-
-#define V2_CQC_BYTE_4_POLL_S 2
-
-#define V2_CQC_BYTE_4_SE_S 3
-
-#define V2_CQC_BYTE_4_OVER_IGNORE_S 4
-
-#define V2_CQC_BYTE_4_COALESCE_S 5
-
#define V2_CQC_BYTE_4_ARM_ST_S 6
#define V2_CQC_BYTE_4_ARM_ST_M GENMASK(7, 6)
-#define V2_CQC_BYTE_4_SHIFT_S 8
-#define V2_CQC_BYTE_4_SHIFT_M GENMASK(12, 8)
-
-#define V2_CQC_BYTE_4_CMD_SN_S 13
-#define V2_CQC_BYTE_4_CMD_SN_M GENMASK(14, 13)
-
#define V2_CQC_BYTE_4_CEQN_S 15
#define V2_CQC_BYTE_4_CEQN_M GENMASK(23, 15)
-#define V2_CQC_BYTE_4_PAGE_OFFSET_S 24
-#define V2_CQC_BYTE_4_PAGE_OFFSET_M GENMASK(31, 24)
-
#define V2_CQC_BYTE_8_CQN_S 0
#define V2_CQC_BYTE_8_CQN_M GENMASK(23, 0)
-#define V2_CQC_BYTE_8_CQE_SIZE_S 27
-#define V2_CQC_BYTE_8_CQE_SIZE_M GENMASK(28, 27)
-
-#define V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S 0
-#define V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M GENMASK(19, 0)
-
#define V2_CQC_BYTE_16_CQE_HOP_NUM_S 30
#define V2_CQC_BYTE_16_CQE_HOP_NUM_M GENMASK(31, 30)
-#define V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_S 0
-#define V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_M GENMASK(19, 0)
-
-#define V2_CQC_BYTE_24_CQE_BA_PG_SZ_S 24
-#define V2_CQC_BYTE_24_CQE_BA_PG_SZ_M GENMASK(27, 24)
-
-#define V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S 28
-#define V2_CQC_BYTE_24_CQE_BUF_PG_SZ_M GENMASK(31, 28)
-
#define V2_CQC_BYTE_28_CQ_PRODUCER_IDX_S 0
#define V2_CQC_BYTE_28_CQ_PRODUCER_IDX_M GENMASK(23, 0)
#define V2_CQC_BYTE_32_CQ_CONSUMER_IDX_S 0
#define V2_CQC_BYTE_32_CQ_CONSUMER_IDX_M GENMASK(23, 0)
-#define V2_CQC_BYTE_40_CQE_BA_S 0
-#define V2_CQC_BYTE_40_CQE_BA_M GENMASK(28, 0)
-
-#define V2_CQC_BYTE_44_DB_RECORD_EN_S 0
-
-#define V2_CQC_BYTE_44_DB_RECORD_ADDR_S 1
-#define V2_CQC_BYTE_44_DB_RECORD_ADDR_M GENMASK(31, 1)
-
#define V2_CQC_BYTE_52_CQE_CNT_S 0
#define V2_CQC_BYTE_52_CQE_CNT_M GENMASK(23, 0)
@@ -374,30 +338,48 @@ struct hns_roce_v2_cq_context {
#define V2_CQC_BYTE_56_CQ_PERIOD_S 16
#define V2_CQC_BYTE_56_CQ_PERIOD_M GENMASK(31, 16)
-#define V2_CQC_BYTE_64_SE_CQE_IDX_S 0
-#define V2_CQC_BYTE_64_SE_CQE_IDX_M GENMASK(23, 0)
-
#define CQC_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_v2_cq_context, h, l)
+#define CQC_CQ_ST CQC_FIELD_LOC(1, 0)
+#define CQC_POLL CQC_FIELD_LOC(2, 2)
+#define CQC_SE CQC_FIELD_LOC(3, 3)
+#define CQC_OVER_IGNORE CQC_FIELD_LOC(4, 4)
+#define CQC_ARM_ST CQC_FIELD_LOC(7, 6)
+#define CQC_SHIFT CQC_FIELD_LOC(12, 8)
+#define CQC_CMD_SN CQC_FIELD_LOC(14, 13)
+#define CQC_CEQN CQC_FIELD_LOC(23, 15)
+#define CQC_CQN CQC_FIELD_LOC(55, 32)
+#define CQC_POE_EN CQC_FIELD_LOC(56, 56)
+#define CQC_POE_NUM CQC_FIELD_LOC(58, 57)
+#define CQC_CQE_SIZE CQC_FIELD_LOC(60, 59)
+#define CQC_CQ_CNT_MODE CQC_FIELD_LOC(61, 61)
#define CQC_STASH CQC_FIELD_LOC(63, 63)
+#define CQC_CQE_CUR_BLK_ADDR_L CQC_FIELD_LOC(95, 64)
+#define CQC_CQE_CUR_BLK_ADDR_H CQC_FIELD_LOC(115, 96)
+#define CQC_POE_QID CQC_FIELD_LOC(125, 116)
+#define CQC_CQE_HOP_NUM CQC_FIELD_LOC(127, 126)
+#define CQC_CQE_NEX_BLK_ADDR_L CQC_FIELD_LOC(159, 128)
+#define CQC_CQE_NEX_BLK_ADDR_H CQC_FIELD_LOC(179, 160)
+#define CQC_CQE_BAR_PG_SZ CQC_FIELD_LOC(187, 184)
+#define CQC_CQE_BUF_PG_SZ CQC_FIELD_LOC(191, 188)
+#define CQC_CQ_PRODUCER_IDX CQC_FIELD_LOC(215, 192)
+#define CQC_CQ_CONSUMER_IDX CQC_FIELD_LOC(247, 224)
+#define CQC_CQE_BA_L CQC_FIELD_LOC(287, 256)
+#define CQC_CQE_BA_H CQC_FIELD_LOC(316, 288)
+#define CQC_POE_QID_H_0 CQC_FIELD_LOC(319, 317)
+#define CQC_DB_RECORD_EN CQC_FIELD_LOC(320, 320)
+#define CQC_CQE_DB_RECORD_ADDR_L CQC_FIELD_LOC(351, 321)
+#define CQC_CQE_DB_RECORD_ADDR_H CQC_FIELD_LOC(383, 352)
+#define CQC_CQE_CNT CQC_FIELD_LOC(407, 384)
+#define CQC_CQ_MAX_CNT CQC_FIELD_LOC(431, 416)
+#define CQC_CQ_PERIOD CQC_FIELD_LOC(447, 432)
+#define CQC_CQE_REPORT_TIMER CQC_FIELD_LOC(471, 448)
+#define CQC_WR_CQE_IDX CQC_FIELD_LOC(479, 472)
+#define CQC_SE_CQE_IDX CQC_FIELD_LOC(503, 480)
+#define CQC_POE_QID_H_1 CQC_FIELD_LOC(511, 511)
struct hns_roce_srq_context {
- __le32 byte_4_srqn_srqst;
- __le32 byte_8_limit_wl;
- __le32 byte_12_xrcd;
- __le32 byte_16_pi_ci;
- __le32 wqe_bt_ba;
- __le32 byte_24_wqe_bt_ba;
- __le32 byte_28_rqws_pd;
- __le32 idx_bt_ba;
- __le32 rsv_idx_bt_ba;
- __le32 idx_cur_blk_addr;
- __le32 byte_44_idxbufpgsz_addr;
- __le32 idx_nxt_blk_addr;
- __le32 rsv_idxnxtblkaddr;
- __le32 byte_56_xrc_cqn;
- __le32 db_record_addr_record_en;
- __le32 db_record_addr;
+ __le32 data[16];
};
#define SRQC_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_srq_context, h, l)
@@ -438,71 +420,6 @@ struct hns_roce_srq_context {
#define SRQC_DB_RECORD_ADDR_L SRQC_FIELD_LOC(479, 449)
#define SRQC_DB_RECORD_ADDR_H SRQC_FIELD_LOC(511, 480)
-#define SRQC_BYTE_4_SRQ_ST_S 0
-#define SRQC_BYTE_4_SRQ_ST_M GENMASK(1, 0)
-
-#define SRQC_BYTE_4_SRQ_WQE_HOP_NUM_S 2
-#define SRQC_BYTE_4_SRQ_WQE_HOP_NUM_M GENMASK(3, 2)
-
-#define SRQC_BYTE_4_SRQ_SHIFT_S 4
-#define SRQC_BYTE_4_SRQ_SHIFT_M GENMASK(7, 4)
-
-#define SRQC_BYTE_4_SRQN_S 8
-#define SRQC_BYTE_4_SRQN_M GENMASK(31, 8)
-
-#define SRQC_BYTE_8_SRQ_LIMIT_WL_S 0
-#define SRQC_BYTE_8_SRQ_LIMIT_WL_M GENMASK(15, 0)
-
-#define SRQC_BYTE_12_SRQ_XRCD_S 0
-#define SRQC_BYTE_12_SRQ_XRCD_M GENMASK(23, 0)
-
-#define SRQC_BYTE_16_SRQ_PRODUCER_IDX_S 0
-#define SRQC_BYTE_16_SRQ_PRODUCER_IDX_M GENMASK(15, 0)
-
-#define SRQC_BYTE_16_SRQ_CONSUMER_IDX_S 0
-#define SRQC_BYTE_16_SRQ_CONSUMER_IDX_M GENMASK(31, 16)
-
-#define SRQC_BYTE_24_SRQ_WQE_BT_BA_S 0
-#define SRQC_BYTE_24_SRQ_WQE_BT_BA_M GENMASK(28, 0)
-
-#define SRQC_BYTE_28_PD_S 0
-#define SRQC_BYTE_28_PD_M GENMASK(23, 0)
-
-#define SRQC_BYTE_28_RQWS_S 24
-#define SRQC_BYTE_28_RQWS_M GENMASK(27, 24)
-
-#define SRQC_BYTE_36_SRQ_IDX_BT_BA_S 0
-#define SRQC_BYTE_36_SRQ_IDX_BT_BA_M GENMASK(28, 0)
-
-#define SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_S 0
-#define SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_M GENMASK(19, 0)
-
-#define SRQC_BYTE_44_SRQ_IDX_HOP_NUM_S 22
-#define SRQC_BYTE_44_SRQ_IDX_HOP_NUM_M GENMASK(23, 22)
-
-#define SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_S 24
-#define SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_M GENMASK(27, 24)
-
-#define SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_S 28
-#define SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_M GENMASK(31, 28)
-
-#define SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_S 0
-#define SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_M GENMASK(19, 0)
-
-#define SRQC_BYTE_56_SRQ_XRC_CQN_S 0
-#define SRQC_BYTE_56_SRQ_XRC_CQN_M GENMASK(23, 0)
-
-#define SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_S 24
-#define SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_M GENMASK(27, 24)
-
-#define SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_S 28
-#define SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_M GENMASK(31, 28)
-
-#define SRQC_BYTE_60_SRQ_RECORD_EN_S 0
-
-#define SRQC_BYTE_60_SRQ_DB_RECORD_ADDR_S 1
-#define SRQC_BYTE_60_SRQ_DB_RECORD_ADDR_M GENMASK(31, 1)
-
enum {
V2_MPT_ST_VALID = 0x1,
V2_MPT_ST_FREE = 0x2,
@@ -590,372 +507,192 @@ struct hns_roce_v2_qp_context {
#define QPC_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_v2_qp_context, h, l)
+#define QPC_TST QPC_FIELD_LOC(2, 0)
+#define QPC_SGE_SHIFT QPC_FIELD_LOC(7, 3)
+#define QPC_CNP_TIMER QPC_FIELD_LOC(31, 8)
+#define QPC_WQE_SGE_BA_L QPC_FIELD_LOC(63, 32)
+#define QPC_WQE_SGE_BA_H QPC_FIELD_LOC(92, 64)
+#define QPC_SQ_HOP_NUM QPC_FIELD_LOC(94, 93)
+#define QPC_CIRE_EN QPC_FIELD_LOC(95, 95)
+#define QPC_WQE_SGE_BA_PG_SZ QPC_FIELD_LOC(99, 96)
+#define QPC_WQE_SGE_BUF_PG_SZ QPC_FIELD_LOC(103, 100)
+#define QPC_PD QPC_FIELD_LOC(127, 104)
+#define QPC_RQ_HOP_NUM QPC_FIELD_LOC(129, 128)
+#define QPC_SGE_HOP_NUM QPC_FIELD_LOC(131, 130)
+#define QPC_RQWS QPC_FIELD_LOC(135, 132)
+#define QPC_SQ_SHIFT QPC_FIELD_LOC(139, 136)
+#define QPC_RQ_SHIFT QPC_FIELD_LOC(143, 140)
+#define QPC_GMV_IDX QPC_FIELD_LOC(159, 144)
+#define QPC_HOPLIMIT QPC_FIELD_LOC(167, 160)
+#define QPC_TC QPC_FIELD_LOC(175, 168)
+#define QPC_VLAN_ID QPC_FIELD_LOC(187, 176)
+#define QPC_MTU QPC_FIELD_LOC(191, 188)
+#define QPC_FL QPC_FIELD_LOC(211, 192)
+#define QPC_SL QPC_FIELD_LOC(215, 212)
+#define QPC_CNP_TX_FLAG QPC_FIELD_LOC(216, 216)
+#define QPC_CE_FLAG QPC_FIELD_LOC(217, 217)
+#define QPC_LBI QPC_FIELD_LOC(218, 218)
+#define QPC_AT QPC_FIELD_LOC(223, 219)
+#define QPC_DGID QPC_FIELD_LOC(351, 224)
+#define QPC_DMAC_L QPC_FIELD_LOC(383, 352)
+#define QPC_DMAC_H QPC_FIELD_LOC(399, 384)
+#define QPC_UDPSPN QPC_FIELD_LOC(415, 400)
+#define QPC_DQPN QPC_FIELD_LOC(439, 416)
+#define QPC_SQ_TX_ERR QPC_FIELD_LOC(440, 440)
+#define QPC_SQ_RX_ERR QPC_FIELD_LOC(441, 441)
+#define QPC_RQ_TX_ERR QPC_FIELD_LOC(442, 442)
+#define QPC_RQ_RX_ERR QPC_FIELD_LOC(443, 443)
+#define QPC_LP_PKTN_INI QPC_FIELD_LOC(447, 444)
#define QPC_CONG_ALGO_TMPL_ID QPC_FIELD_LOC(455, 448)
-
-#define V2_QPC_BYTE_4_TST_S 0
-#define V2_QPC_BYTE_4_TST_M GENMASK(2, 0)
-
-#define V2_QPC_BYTE_4_SGE_SHIFT_S 3
-#define V2_QPC_BYTE_4_SGE_SHIFT_M GENMASK(7, 3)
-
-#define V2_QPC_BYTE_4_SQPN_S 8
-#define V2_QPC_BYTE_4_SQPN_M GENMASK(31, 8)
-
-#define V2_QPC_BYTE_12_WQE_SGE_BA_S 0
-#define V2_QPC_BYTE_12_WQE_SGE_BA_M GENMASK(28, 0)
-
-#define V2_QPC_BYTE_12_SQ_HOP_NUM_S 29
-#define V2_QPC_BYTE_12_SQ_HOP_NUM_M GENMASK(30, 29)
-
-#define V2_QPC_BYTE_12_RSVD_LKEY_EN_S 31
-
-#define V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S 0
-#define V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M GENMASK(3, 0)
-
-#define V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S 4
-#define V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M GENMASK(7, 4)
-
-#define V2_QPC_BYTE_16_PD_S 8
-#define V2_QPC_BYTE_16_PD_M GENMASK(31, 8)
-
-#define V2_QPC_BYTE_20_RQ_HOP_NUM_S 0
-#define V2_QPC_BYTE_20_RQ_HOP_NUM_M GENMASK(1, 0)
-
-#define V2_QPC_BYTE_20_SGE_HOP_NUM_S 2
-#define V2_QPC_BYTE_20_SGE_HOP_NUM_M GENMASK(3, 2)
-
-#define V2_QPC_BYTE_20_RQWS_S 4
-#define V2_QPC_BYTE_20_RQWS_M GENMASK(7, 4)
-
-#define V2_QPC_BYTE_20_SQ_SHIFT_S 8
-#define V2_QPC_BYTE_20_SQ_SHIFT_M GENMASK(11, 8)
-
-#define V2_QPC_BYTE_20_RQ_SHIFT_S 12
-#define V2_QPC_BYTE_20_RQ_SHIFT_M GENMASK(15, 12)
-
-#define V2_QPC_BYTE_20_SGID_IDX_S 16
-#define V2_QPC_BYTE_20_SGID_IDX_M GENMASK(23, 16)
-
-#define V2_QPC_BYTE_20_SMAC_IDX_S 24
-#define V2_QPC_BYTE_20_SMAC_IDX_M GENMASK(31, 24)
-
-#define V2_QPC_BYTE_24_HOP_LIMIT_S 0
-#define V2_QPC_BYTE_24_HOP_LIMIT_M GENMASK(7, 0)
-
-#define V2_QPC_BYTE_24_TC_S 8
-#define V2_QPC_BYTE_24_TC_M GENMASK(15, 8)
-
-#define V2_QPC_BYTE_24_VLAN_ID_S 16
-#define V2_QPC_BYTE_24_VLAN_ID_M GENMASK(27, 16)
-
-#define V2_QPC_BYTE_24_MTU_S 28
-#define V2_QPC_BYTE_24_MTU_M GENMASK(31, 28)
-
-#define V2_QPC_BYTE_28_FL_S 0
-#define V2_QPC_BYTE_28_FL_M GENMASK(19, 0)
-
-#define V2_QPC_BYTE_28_SL_S 20
-#define V2_QPC_BYTE_28_SL_M GENMASK(23, 20)
-
-#define V2_QPC_BYTE_28_CNP_TX_FLAG_S 24
-
-#define V2_QPC_BYTE_28_CE_FLAG_S 25
-
-#define V2_QPC_BYTE_28_LBI_S 26
-
-#define V2_QPC_BYTE_28_AT_S 27
-#define V2_QPC_BYTE_28_AT_M GENMASK(31, 27)
-
-#define V2_QPC_BYTE_52_DMAC_S 0
-#define V2_QPC_BYTE_52_DMAC_M GENMASK(15, 0)
-
-#define V2_QPC_BYTE_52_UDPSPN_S 16
-#define V2_QPC_BYTE_52_UDPSPN_M GENMASK(31, 16)
-
-#define V2_QPC_BYTE_56_DQPN_S 0
-#define V2_QPC_BYTE_56_DQPN_M GENMASK(23, 0)
-
-#define V2_QPC_BYTE_56_SQ_TX_ERR_S 24
-#define V2_QPC_BYTE_56_SQ_RX_ERR_S 25
-#define V2_QPC_BYTE_56_RQ_TX_ERR_S 26
-#define V2_QPC_BYTE_56_RQ_RX_ERR_S 27
-
-#define V2_QPC_BYTE_56_LP_PKTN_INI_S 28
-#define V2_QPC_BYTE_56_LP_PKTN_INI_M GENMASK(31, 28)
-
-#define V2_QPC_BYTE_60_SCC_TOKEN_S 8
-#define V2_QPC_BYTE_60_SCC_TOKEN_M GENMASK(26, 8)
-
-#define V2_QPC_BYTE_60_SQ_DB_DOING_S 27
-
-#define V2_QPC_BYTE_60_RQ_DB_DOING_S 28
-
-#define V2_QPC_BYTE_60_QP_ST_S 29
-#define V2_QPC_BYTE_60_QP_ST_M GENMASK(31, 29)
-
-#define V2_QPC_BYTE_68_RQ_RECORD_EN_S 0
-
-#define V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S 1
-#define V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M GENMASK(31, 1)
-
-#define V2_QPC_BYTE_76_SRQN_S 0
-#define V2_QPC_BYTE_76_SRQN_M GENMASK(23, 0)
-
-#define V2_QPC_BYTE_76_SRQ_EN_S 24
-
-#define V2_QPC_BYTE_76_RRE_S 25
-
-#define V2_QPC_BYTE_76_RWE_S 26
-
-#define V2_QPC_BYTE_76_ATE_S 27
-
-#define V2_QPC_BYTE_76_RQIE_S 28
-#define V2_QPC_BYTE_76_EXT_ATE_S 29
-#define V2_QPC_BYTE_76_RQ_VLAN_EN_S 30
-#define V2_QPC_BYTE_80_RX_CQN_S 0
-#define V2_QPC_BYTE_80_RX_CQN_M GENMASK(23, 0)
-
-#define V2_QPC_BYTE_80_XRC_QP_TYPE_S 24
-
-#define V2_QPC_BYTE_80_MIN_RNR_TIME_S 27
-#define V2_QPC_BYTE_80_MIN_RNR_TIME_M GENMASK(31, 27)
-
-#define V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S 0
-#define V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M GENMASK(15, 0)
-
-#define V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S 16
-#define V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M GENMASK(31, 16)
-
-#define V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S 0
-#define V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M GENMASK(19, 0)
-
-#define V2_QPC_BYTE_92_SRQ_INFO_S 20
-#define V2_QPC_BYTE_92_SRQ_INFO_M GENMASK(31, 20)
-
-#define V2_QPC_BYTE_96_RX_REQ_MSN_S 0
-#define V2_QPC_BYTE_96_RX_REQ_MSN_M GENMASK(23, 0)
-
-#define V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S 0
-#define V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M GENMASK(19, 0)
-
-#define V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_S 24
-#define V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_M GENMASK(31, 24)
-
-#define V2_QPC_BYTE_108_INV_CREDIT_S 0
-
-#define V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S 3
-
-#define V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S 4
-#define V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M GENMASK(6, 4)
-
-#define V2_QPC_BYTE_108_RX_REQ_RNR_S 7
-
-#define V2_QPC_BYTE_108_RX_REQ_EPSN_S 8
-#define V2_QPC_BYTE_108_RX_REQ_EPSN_M GENMASK(31, 8)
-
-#define V2_QPC_BYTE_132_TRRL_HEAD_MAX_S 0
-#define V2_QPC_BYTE_132_TRRL_HEAD_MAX_M GENMASK(7, 0)
-
-#define V2_QPC_BYTE_132_TRRL_TAIL_MAX_S 8
-#define V2_QPC_BYTE_132_TRRL_TAIL_MAX_M GENMASK(15, 8)
-
-#define V2_QPC_BYTE_132_TRRL_BA_S 16
-#define V2_QPC_BYTE_132_TRRL_BA_M GENMASK(31, 16)
-
-#define V2_QPC_BYTE_140_TRRL_BA_S 0
-#define V2_QPC_BYTE_140_TRRL_BA_M GENMASK(11, 0)
-
-#define V2_QPC_BYTE_140_RR_MAX_S 12
-#define V2_QPC_BYTE_140_RR_MAX_M GENMASK(14, 12)
-
-#define V2_QPC_BYTE_140_RQ_RTY_WAIT_DO_S 15
-
-#define V2_QPC_BYTE_140_RAQ_TRRL_HEAD_S 16
-#define V2_QPC_BYTE_140_RAQ_TRRL_HEAD_M GENMASK(23, 16)
-
-#define V2_QPC_BYTE_140_RAQ_TRRL_TAIL_S 24
-#define V2_QPC_BYTE_140_RAQ_TRRL_TAIL_M GENMASK(31, 24)
-
-#define V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_S 0
-#define V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_M GENMASK(23, 0)
-
-#define V2_QPC_BYTE_144_RAQ_CREDIT_S 25
-#define V2_QPC_BYTE_144_RAQ_CREDIT_M GENMASK(29, 25)
-
-#define V2_QPC_BYTE_144_RESP_RTY_FLG_S 31
-
-#define V2_QPC_BYTE_148_RQ_MSN_S 0
-#define V2_QPC_BYTE_148_RQ_MSN_M GENMASK(23, 0)
-
-#define V2_QPC_BYTE_148_RAQ_SYNDROME_S 24
-#define V2_QPC_BYTE_148_RAQ_SYNDROME_M GENMASK(31, 24)
-
-#define V2_QPC_BYTE_152_RAQ_PSN_S 0
-#define V2_QPC_BYTE_152_RAQ_PSN_M GENMASK(23, 0)
-
-#define V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_S 24
-#define V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_M GENMASK(31, 24)
-
-#define V2_QPC_BYTE_156_RAQ_USE_PKTN_S 0
-#define V2_QPC_BYTE_156_RAQ_USE_PKTN_M GENMASK(23, 0)
-
-#define V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S 0
-#define V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M GENMASK(15, 0)
-
-#define V2_QPC_BYTE_160_SQ_CONSUMER_IDX_S 16
-#define V2_QPC_BYTE_160_SQ_CONSUMER_IDX_M GENMASK(31, 16)
-
-#define V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S 0
-#define V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M GENMASK(19, 0)
-
-#define V2_QPC_BYTE_168_MSG_RTY_LP_FLG_S 20
-
-#define V2_QPC_BYTE_168_SQ_INVLD_FLG_S 21
-
-#define V2_QPC_BYTE_168_LP_SGEN_INI_S 22
-#define V2_QPC_BYTE_168_LP_SGEN_INI_M GENMASK(23, 22)
-
-#define V2_QPC_BYTE_168_SQ_VLAN_EN_S 24
-#define V2_QPC_BYTE_168_POLL_DB_WAIT_DO_S 25
-#define V2_QPC_BYTE_168_SCC_TOKEN_FORBID_SQ_DEQ_S 26
-#define V2_QPC_BYTE_168_WAIT_ACK_TIMEOUT_S 27
-#define V2_QPC_BYTE_168_IRRL_IDX_LSB_S 28
-#define V2_QPC_BYTE_168_IRRL_IDX_LSB_M GENMASK(31, 28)
-
-#define V2_QPC_BYTE_172_ACK_REQ_FREQ_S 0
-#define V2_QPC_BYTE_172_ACK_REQ_FREQ_M GENMASK(5, 0)
-
-#define V2_QPC_BYTE_172_MSG_RNR_FLG_S 6
-
-#define V2_QPC_BYTE_172_FRE_S 7
-
-#define V2_QPC_BYTE_172_SQ_CUR_PSN_S 8
-#define V2_QPC_BYTE_172_SQ_CUR_PSN_M GENMASK(31, 8)
-
-#define V2_QPC_BYTE_176_MSG_USE_PKTN_S 0
-#define V2_QPC_BYTE_176_MSG_USE_PKTN_M GENMASK(23, 0)
-
-#define V2_QPC_BYTE_176_IRRL_HEAD_PRE_S 24
-#define V2_QPC_BYTE_176_IRRL_HEAD_PRE_M GENMASK(31, 24)
-
-#define V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S 0
-#define V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M GENMASK(19, 0)
-
-#define V2_QPC_BYTE_184_IRRL_IDX_MSB_S 20
-#define V2_QPC_BYTE_184_IRRL_IDX_MSB_M GENMASK(31, 20)
-
-#define V2_QPC_BYTE_192_CUR_SGE_IDX_S 0
-#define V2_QPC_BYTE_192_CUR_SGE_IDX_M GENMASK(23, 0)
-
-#define V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_S 24
-#define V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_M GENMASK(31, 24)
-
-#define V2_QPC_BYTE_196_IRRL_HEAD_S 0
-#define V2_QPC_BYTE_196_IRRL_HEAD_M GENMASK(7, 0)
-
-#define V2_QPC_BYTE_196_SQ_MAX_PSN_S 8
-#define V2_QPC_BYTE_196_SQ_MAX_PSN_M GENMASK(31, 8)
-
-#define V2_QPC_BYTE_200_SQ_MAX_IDX_S 0
-#define V2_QPC_BYTE_200_SQ_MAX_IDX_M GENMASK(15, 0)
-
-#define V2_QPC_BYTE_200_LCL_OPERATED_CNT_S 16
-#define V2_QPC_BYTE_200_LCL_OPERATED_CNT_M GENMASK(31, 16)
-
-#define V2_QPC_BYTE_208_IRRL_BA_S 0
-#define V2_QPC_BYTE_208_IRRL_BA_M GENMASK(25, 0)
-
-#define V2_QPC_BYTE_208_PKT_RNR_FLG_S 26
-
-#define V2_QPC_BYTE_208_PKT_RTY_FLG_S 27
-
-#define V2_QPC_BYTE_208_RMT_E2E_S 28
-
-#define V2_QPC_BYTE_208_SR_MAX_S 29
-#define V2_QPC_BYTE_208_SR_MAX_M GENMASK(31, 29)
-
-#define V2_QPC_BYTE_212_LSN_S 0
-#define V2_QPC_BYTE_212_LSN_M GENMASK(23, 0)
-
-#define V2_QPC_BYTE_212_RETRY_NUM_INIT_S 24
-#define V2_QPC_BYTE_212_RETRY_NUM_INIT_M GENMASK(26, 24)
-
-#define V2_QPC_BYTE_212_CHECK_FLG_S 27
-#define V2_QPC_BYTE_212_CHECK_FLG_M GENMASK(28, 27)
-
-#define V2_QPC_BYTE_212_RETRY_CNT_S 29
-#define V2_QPC_BYTE_212_RETRY_CNT_M GENMASK(31, 29)
-
-#define V2_QPC_BYTE_220_RETRY_MSG_MSN_S 0
-#define V2_QPC_BYTE_220_RETRY_MSG_MSN_M GENMASK(15, 0)
-
-#define V2_QPC_BYTE_220_RETRY_MSG_PSN_S 16
-#define V2_QPC_BYTE_220_RETRY_MSG_PSN_M GENMASK(31, 16)
-
-#define V2_QPC_BYTE_224_RETRY_MSG_PSN_S 0
-#define V2_QPC_BYTE_224_RETRY_MSG_PSN_M GENMASK(7, 0)
-
-#define V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S 8
-#define V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M GENMASK(31, 8)
-
-#define V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S 0
-#define V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M GENMASK(19, 0)
-
-#define V2_QPC_BYTE_232_IRRL_SGE_IDX_S 20
-#define V2_QPC_BYTE_232_IRRL_SGE_IDX_M GENMASK(28, 20)
-
-#define V2_QPC_BYTE_232_SO_LP_VLD_S 29
-#define V2_QPC_BYTE_232_FENCE_LP_VLD_S 30
-#define V2_QPC_BYTE_232_IRRL_LP_VLD_S 31
-
-#define V2_QPC_BYTE_240_IRRL_TAIL_REAL_S 0
-#define V2_QPC_BYTE_240_IRRL_TAIL_REAL_M GENMASK(7, 0)
-
-#define V2_QPC_BYTE_240_IRRL_TAIL_RD_S 8
-#define V2_QPC_BYTE_240_IRRL_TAIL_RD_M GENMASK(15, 8)
-
-#define V2_QPC_BYTE_240_RX_ACK_MSN_S 16
-#define V2_QPC_BYTE_240_RX_ACK_MSN_M GENMASK(31, 16)
-
-#define V2_QPC_BYTE_244_RX_ACK_EPSN_S 0
-#define V2_QPC_BYTE_244_RX_ACK_EPSN_M GENMASK(23, 0)
-
-#define V2_QPC_BYTE_244_RNR_NUM_INIT_S 24
-#define V2_QPC_BYTE_244_RNR_NUM_INIT_M GENMASK(26, 24)
-
-#define V2_QPC_BYTE_244_RNR_CNT_S 27
-#define V2_QPC_BYTE_244_RNR_CNT_M GENMASK(29, 27)
-
-#define V2_QPC_BYTE_244_LCL_OP_FLG_S 30
-#define V2_QPC_BYTE_244_IRRL_RD_FLG_S 31
-
-#define V2_QPC_BYTE_248_IRRL_PSN_S 0
-#define V2_QPC_BYTE_248_IRRL_PSN_M GENMASK(23, 0)
-
-#define V2_QPC_BYTE_248_ACK_PSN_ERR_S 24
-
-#define V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S 25
-#define V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M GENMASK(26, 25)
-
-#define V2_QPC_BYTE_248_IRRL_PSN_VLD_S 27
-
-#define V2_QPC_BYTE_248_RNR_RETRY_FLAG_S 28
-
-#define V2_QPC_BYTE_248_CQ_ERR_IND_S 31
-
-#define V2_QPC_BYTE_252_TX_CQN_S 0
-#define V2_QPC_BYTE_252_TX_CQN_M GENMASK(23, 0)
-
-#define V2_QPC_BYTE_252_SIG_TYPE_S 24
-
-#define V2_QPC_BYTE_252_ERR_TYPE_S 25
-#define V2_QPC_BYTE_252_ERR_TYPE_M GENMASK(31, 25)
-
-#define V2_QPC_BYTE_256_RQ_CQE_IDX_S 0
-#define V2_QPC_BYTE_256_RQ_CQE_IDX_M GENMASK(15, 0)
-
-#define V2_QPC_BYTE_256_SQ_FLUSH_IDX_S 16
-#define V2_QPC_BYTE_256_SQ_FLUSH_IDX_M GENMASK(31, 16)
+#define QPC_SCC_TOKEN QPC_FIELD_LOC(474, 456)
+#define QPC_SQ_DB_DOING QPC_FIELD_LOC(475, 475)
+#define QPC_RQ_DB_DOING QPC_FIELD_LOC(476, 476)
+#define QPC_QP_ST QPC_FIELD_LOC(479, 477)
+#define QPC_QKEY_XRCD QPC_FIELD_LOC(511, 480)
+#define QPC_RQ_RECORD_EN QPC_FIELD_LOC(512, 512)
+#define QPC_RQ_DB_RECORD_ADDR_L QPC_FIELD_LOC(543, 513)
+#define QPC_RQ_DB_RECORD_ADDR_H QPC_FIELD_LOC(575, 544)
+#define QPC_SRQN QPC_FIELD_LOC(599, 576)
+#define QPC_SRQ_EN QPC_FIELD_LOC(600, 600)
+#define QPC_RRE QPC_FIELD_LOC(601, 601)
+#define QPC_RWE QPC_FIELD_LOC(602, 602)
+#define QPC_ATE QPC_FIELD_LOC(603, 603)
+#define QPC_RQIE QPC_FIELD_LOC(604, 604)
+#define QPC_EXT_ATE QPC_FIELD_LOC(605, 605)
+#define QPC_RQ_VLAN_EN QPC_FIELD_LOC(606, 606)
+#define QPC_RQ_RTY_TX_ERR QPC_FIELD_LOC(607, 607)
+#define QPC_RX_CQN QPC_FIELD_LOC(631, 608)
+#define QPC_XRC_QP_TYPE QPC_FIELD_LOC(632, 632)
+#define QPC_RSV3 QPC_FIELD_LOC(634, 633)
+#define QPC_MIN_RNR_TIME QPC_FIELD_LOC(639, 635)
+#define QPC_RQ_PRODUCER_IDX QPC_FIELD_LOC(655, 640)
+#define QPC_RQ_CONSUMER_IDX QPC_FIELD_LOC(671, 656)
+#define QPC_RQ_CUR_BLK_ADDR_L QPC_FIELD_LOC(703, 672)
+#define QPC_RQ_CUR_BLK_ADDR_H QPC_FIELD_LOC(723, 704)
+#define QPC_SRQ_INFO QPC_FIELD_LOC(735, 724)
+#define QPC_RX_REQ_MSN QPC_FIELD_LOC(759, 736)
+#define QPC_REDUCE_CODE QPC_FIELD_LOC(766, 760)
+#define QPC_RX_XRC_PKT_CQE_FLG QPC_FIELD_LOC(767, 767)
+#define QPC_RQ_NXT_BLK_ADDR_L QPC_FIELD_LOC(799, 768)
+#define QPC_RQ_NXT_BLK_ADDR_H QPC_FIELD_LOC(819, 800)
+#define QPC_REDUCE_EN QPC_FIELD_LOC(820, 820)
+#define QPC_FLUSH_EN QPC_FIELD_LOC(821, 821)
+#define QPC_AW_EN QPC_FIELD_LOC(822, 822)
+#define QPC_WN_EN QPC_FIELD_LOC(823, 823)
+#define QPC_RQ_CUR_WQE_SGE_NUM QPC_FIELD_LOC(831, 824)
+#define QPC_INV_CREDIT QPC_FIELD_LOC(832, 832)
+#define QPC_LAST_WRITE_TYPE QPC_FIELD_LOC(834, 833)
+#define QPC_RX_REQ_PSN_ERR QPC_FIELD_LOC(835, 835)
+#define QPC_RX_REQ_LAST_OPTYPE QPC_FIELD_LOC(838, 836)
+#define QPC_RX_REQ_RNR QPC_FIELD_LOC(839, 839)
+#define QPC_RX_REQ_EPSN QPC_FIELD_LOC(863, 840)
+#define QPC_RQ_RNR_TIMER QPC_FIELD_LOC(895, 864)
+#define QPC_RX_MSG_LEN QPC_FIELD_LOC(927, 896)
+#define QPC_RX_RKEY_PKT_INFO QPC_FIELD_LOC(959, 928)
+#define QPC_RX_VA QPC_FIELD_LOC(1023, 960)
+#define QPC_TRRL_HEAD_MAX QPC_FIELD_LOC(1031, 1024)
+#define QPC_TRRL_TAIL_MAX QPC_FIELD_LOC(1039, 1032)
+#define QPC_TRRL_BA_L QPC_FIELD_LOC(1055, 1040)
+#define QPC_TRRL_BA_M QPC_FIELD_LOC(1087, 1056)
+#define QPC_TRRL_BA_H QPC_FIELD_LOC(1099, 1088)
+#define QPC_RR_MAX QPC_FIELD_LOC(1102, 1100)
+#define QPC_RQ_RTY_WAIT_DO QPC_FIELD_LOC(1103, 1103)
+#define QPC_RAQ_TRRL_HEAD QPC_FIELD_LOC(1111, 1104)
+#define QPC_RAQ_TRRL_TAIL QPC_FIELD_LOC(1119, 1112)
+#define QPC_RAQ_RTY_INI_PSN QPC_FIELD_LOC(1143, 1120)
+#define QPC_CIRE_SLV_RQ_EN QPC_FIELD_LOC(1144, 1144)
+#define QPC_RAQ_CREDIT QPC_FIELD_LOC(1149, 1145)
+#define QPC_RQ_DB_IN_EXT QPC_FIELD_LOC(1150, 1150)
+#define QPC_RESP_RTY_FLG QPC_FIELD_LOC(1151, 1151)
+#define QPC_RAQ_MSN QPC_FIELD_LOC(1175, 1152)
+#define QPC_RAQ_SYNDROME QPC_FIELD_LOC(1183, 1176)
+#define QPC_RAQ_PSN QPC_FIELD_LOC(1207, 1184)
+#define QPC_RAQ_TRRL_RTY_HEAD QPC_FIELD_LOC(1215, 1208)
+#define QPC_RAQ_USE_PKTN QPC_FIELD_LOC(1239, 1216)
+#define QPC_RQ_SCC_TOKEN QPC_FIELD_LOC(1245, 1240)
+#define QPC_RVD10 QPC_FIELD_LOC(1247, 1246)
+#define QPC_SQ_PRODUCER_IDX QPC_FIELD_LOC(1263, 1248)
+#define QPC_SQ_CONSUMER_IDX QPC_FIELD_LOC(1279, 1264)
+#define QPC_SQ_CUR_BLK_ADDR_L QPC_FIELD_LOC(1311, 1280)
+#define QPC_SQ_CUR_BLK_ADDR_H QPC_FIELD_LOC(1331, 1312)
+#define QPC_MSG_RTY_LP_FLG QPC_FIELD_LOC(1332, 1332)
+#define QPC_SQ_INVLD_FLG QPC_FIELD_LOC(1333, 1333)
+#define QPC_LP_SGEN_INI QPC_FIELD_LOC(1335, 1334)
+#define QPC_SQ_VLAN_EN QPC_FIELD_LOC(1336, 1336)
+#define QPC_POLL_DB_WAIT_DO QPC_FIELD_LOC(1337, 1337)
+#define QPC_SCC_TOKEN_FORBID_SQ_DEQ QPC_FIELD_LOC(1338, 1338)
+#define QPC_WAIT_ACK_TIMEOUT QPC_FIELD_LOC(1339, 1339)
+#define QPC_IRRL_IDX_LSB QPC_FIELD_LOC(1343, 1340)
+#define QPC_ACK_REQ_FREQ QPC_FIELD_LOC(1349, 1344)
+#define QPC_MSG_RNR_FLG QPC_FIELD_LOC(1350, 1350)
+#define QPC_FRE QPC_FIELD_LOC(1351, 1351)
+#define QPC_SQ_CUR_PSN QPC_FIELD_LOC(1375, 1352)
+#define QPC_MSG_USE_PKTN QPC_FIELD_LOC(1399, 1376)
+#define QPC_IRRL_HEAD_PRE QPC_FIELD_LOC(1407, 1400)
+#define QPC_SQ_CUR_SGE_BLK_ADDR_L QPC_FIELD_LOC(1439, 1408)
+#define QPC_SQ_CUR_SGE_BLK_ADDR_H QPC_FIELD_LOC(1459, 1440)
+#define QPC_IRRL_IDX_MSB QPC_FIELD_LOC(1471, 1460)
+#define QPC_CUR_SGE_OFFSET QPC_FIELD_LOC(1503, 1472)
+#define QPC_CUR_SGE_IDX QPC_FIELD_LOC(1527, 1504)
+#define QPC_EXT_SGE_NUM_LEFT QPC_FIELD_LOC(1535, 1528)
+#define QPC_OWNER_MODE QPC_FIELD_LOC(1536, 1536)
+#define QPC_CIRE_SLV_SQ_EN QPC_FIELD_LOC(1537, 1537)
+#define QPC_CIRE_DOING QPC_FIELD_LOC(1538, 1538)
+#define QPC_CIRE_RESULT QPC_FIELD_LOC(1539, 1539)
+#define QPC_OWNER_DB_WAIT_DO QPC_FIELD_LOC(1540, 1540)
+#define QPC_SQ_WQE_INVLD QPC_FIELD_LOC(1541, 1541)
+#define QPC_DCA_MODE QPC_FIELD_LOC(1542, 1542)
+#define QPC_RTY_OWNER_NOCHK QPC_FIELD_LOC(1543, 1543)
+#define QPC_V2_IRRL_HEAD QPC_FIELD_LOC(1543, 1536)
+#define QPC_SQ_MAX_PSN QPC_FIELD_LOC(1567, 1544)
+#define QPC_SQ_MAX_IDX QPC_FIELD_LOC(1583, 1568)
+#define QPC_LCL_OPERATED_CNT QPC_FIELD_LOC(1599, 1584)
+#define QPC_IRRL_BA_L QPC_FIELD_LOC(1631, 1600)
+#define QPC_IRRL_BA_H QPC_FIELD_LOC(1657, 1632)
+#define QPC_PKT_RNR_FLG QPC_FIELD_LOC(1658, 1658)
+#define QPC_PKT_RTY_FLG QPC_FIELD_LOC(1659, 1659)
+#define QPC_RMT_E2E QPC_FIELD_LOC(1660, 1660)
+#define QPC_SR_MAX QPC_FIELD_LOC(1663, 1661)
+#define QPC_LSN QPC_FIELD_LOC(1687, 1664)
+#define QPC_RETRY_NUM_INIT QPC_FIELD_LOC(1690, 1688)
+#define QPC_CHECK_FLG QPC_FIELD_LOC(1692, 1691)
+#define QPC_RETRY_CNT QPC_FIELD_LOC(1695, 1693)
+#define QPC_SQ_TIMER QPC_FIELD_LOC(1727, 1696)
+#define QPC_RETRY_MSG_MSN QPC_FIELD_LOC(1743, 1728)
+#define QPC_RETRY_MSG_PSN_L QPC_FIELD_LOC(1759, 1744)
+#define QPC_RETRY_MSG_PSN_H QPC_FIELD_LOC(1767, 1760)
+#define QPC_RETRY_MSG_FPKT_PSN QPC_FIELD_LOC(1791, 1768)
+#define QPC_RX_SQ_CUR_BLK_ADDR_L QPC_FIELD_LOC(1823, 1792)
+#define QPC_RX_SQ_CUR_BLK_ADDR_H QPC_FIELD_LOC(1843, 1824)
+#define QPC_IRRL_SGE_IDX QPC_FIELD_LOC(1851, 1844)
+#define QPC_LSAN_EN QPC_FIELD_LOC(1852, 1852)
+#define QPC_SO_LP_VLD QPC_FIELD_LOC(1853, 1853)
+#define QPC_FENCE_LP_VLD QPC_FIELD_LOC(1854, 1854)
+#define QPC_IRRL_LP_VLD QPC_FIELD_LOC(1855, 1855)
+#define QPC_IRRL_CUR_SGE_OFFSET QPC_FIELD_LOC(1887, 1856)
+#define QPC_IRRL_TAIL_REAL QPC_FIELD_LOC(1895, 1888)
+#define QPC_IRRL_TAIL_RD QPC_FIELD_LOC(1903, 1896)
+#define QPC_RX_ACK_MSN QPC_FIELD_LOC(1919, 1904)
+#define QPC_RX_ACK_EPSN QPC_FIELD_LOC(1943, 1920)
+#define QPC_RNR_NUM_INIT QPC_FIELD_LOC(1946, 1944)
+#define QPC_RNR_CNT QPC_FIELD_LOC(1949, 1947)
+#define QPC_LCL_OP_FLG QPC_FIELD_LOC(1950, 1950)
+#define QPC_IRRL_RD_FLG QPC_FIELD_LOC(1951, 1951)
+#define QPC_IRRL_PSN QPC_FIELD_LOC(1975, 1952)
+#define QPC_ACK_PSN_ERR QPC_FIELD_LOC(1976, 1976)
+#define QPC_ACK_LAST_OPTYPE QPC_FIELD_LOC(1978, 1977)
+#define QPC_IRRL_PSN_VLD QPC_FIELD_LOC(1979, 1979)
+#define QPC_RNR_RETRY_FLAG QPC_FIELD_LOC(1980, 1980)
+#define QPC_SQ_RTY_TX_ERR QPC_FIELD_LOC(1981, 1981)
+#define QPC_LAST_IND QPC_FIELD_LOC(1982, 1982)
+#define QPC_CQ_ERR_IND QPC_FIELD_LOC(1983, 1983)
+#define QPC_TX_CQN QPC_FIELD_LOC(2007, 1984)
+#define QPC_SIG_TYPE QPC_FIELD_LOC(2008, 2008)
+#define QPC_ERR_TYPE QPC_FIELD_LOC(2015, 2009)
+#define QPC_RQ_CQE_IDX QPC_FIELD_LOC(2031, 2016)
+#define QPC_SQ_FLUSH_IDX QPC_FIELD_LOC(2047, 2032)
+
+#define RETRY_MSG_PSN_SHIFT 16
#define QPCEX_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_v2_qp_context_ex, h, l)
@@ -963,6 +700,7 @@ struct hns_roce_v2_qp_context {
#define QPCEX_CONG_ALG_SUB_SEL QPCEX_FIELD_LOC(1, 1)
#define QPCEX_DIP_CTX_IDX_VLD QPCEX_FIELD_LOC(2, 2)
#define QPCEX_DIP_CTX_IDX QPCEX_FIELD_LOC(22, 3)
+#define QPCEX_SQ_RQ_NOT_FORBID_EN QPCEX_FIELD_LOC(23, 23)
#define QPCEX_STASH QPCEX_FIELD_LOC(82, 82)
#define V2_QP_RWE_S 1 /* rdma write enable */
@@ -984,56 +722,31 @@ struct hns_roce_v2_cqe {
__le32 rsv[8];
};
-#define V2_CQE_BYTE_4_OPCODE_S 0
-#define V2_CQE_BYTE_4_OPCODE_M GENMASK(4, 0)
-
-#define V2_CQE_BYTE_4_RQ_INLINE_S 5
-
-#define V2_CQE_BYTE_4_S_R_S 6
-
-#define V2_CQE_BYTE_4_OWNER_S 7
-
-#define V2_CQE_BYTE_4_STATUS_S 8
-#define V2_CQE_BYTE_4_STATUS_M GENMASK(15, 8)
-
-#define V2_CQE_BYTE_4_WQE_INDX_S 16
-#define V2_CQE_BYTE_4_WQE_INDX_M GENMASK(31, 16)
-
-#define V2_CQE_BYTE_12_XRC_SRQN_S 0
-#define V2_CQE_BYTE_12_XRC_SRQN_M GENMASK(23, 0)
-
-#define V2_CQE_BYTE_16_LCL_QPN_S 0
-#define V2_CQE_BYTE_16_LCL_QPN_M GENMASK(23, 0)
-
-#define V2_CQE_BYTE_16_SUB_STATUS_S 24
-#define V2_CQE_BYTE_16_SUB_STATUS_M GENMASK(31, 24)
-
-#define V2_CQE_BYTE_28_SMAC_4_S 0
-#define V2_CQE_BYTE_28_SMAC_4_M GENMASK(7, 0)
-
-#define V2_CQE_BYTE_28_SMAC_5_S 8
-#define V2_CQE_BYTE_28_SMAC_5_M GENMASK(15, 8)
-
-#define V2_CQE_BYTE_28_PORT_TYPE_S 16
-#define V2_CQE_BYTE_28_PORT_TYPE_M GENMASK(17, 16)
-
-#define V2_CQE_BYTE_28_VID_S 18
-#define V2_CQE_BYTE_28_VID_M GENMASK(29, 18)
-
-#define V2_CQE_BYTE_28_VID_VLD_S 30
-
-#define V2_CQE_BYTE_32_RMT_QPN_S 0
-#define V2_CQE_BYTE_32_RMT_QPN_M GENMASK(23, 0)
-
-#define V2_CQE_BYTE_32_SL_S 24
-#define V2_CQE_BYTE_32_SL_M GENMASK(26, 24)
-
-#define V2_CQE_BYTE_32_PORTN_S 27
-#define V2_CQE_BYTE_32_PORTN_M GENMASK(29, 27)
-
-#define V2_CQE_BYTE_32_GRH_S 30
-
-#define V2_CQE_BYTE_32_LPK_S 31
+#define CQE_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_v2_cqe, h, l)
+
+#define CQE_OPCODE CQE_FIELD_LOC(4, 0)
+#define CQE_RQ_INLINE CQE_FIELD_LOC(5, 5)
+#define CQE_S_R CQE_FIELD_LOC(6, 6)
+#define CQE_OWNER CQE_FIELD_LOC(7, 7)
+#define CQE_STATUS CQE_FIELD_LOC(15, 8)
+#define CQE_WQE_IDX CQE_FIELD_LOC(31, 16)
+#define CQE_RKEY_IMMTDATA CQE_FIELD_LOC(63, 32)
+#define CQE_XRC_SRQN CQE_FIELD_LOC(87, 64)
+#define CQE_RSV0 CQE_FIELD_LOC(95, 88)
+#define CQE_LCL_QPN CQE_FIELD_LOC(119, 96)
+#define CQE_SUB_STATUS CQE_FIELD_LOC(127, 120)
+#define CQE_BYTE_CNT CQE_FIELD_LOC(159, 128)
+#define CQE_SMAC CQE_FIELD_LOC(207, 160)
+#define CQE_PORT_TYPE CQE_FIELD_LOC(209, 208)
+#define CQE_VID CQE_FIELD_LOC(221, 210)
+#define CQE_VID_VLD CQE_FIELD_LOC(222, 222)
+#define CQE_RSV2 CQE_FIELD_LOC(223, 223)
+#define CQE_RMT_QPN CQE_FIELD_LOC(247, 224)
+#define CQE_SL CQE_FIELD_LOC(250, 248)
+#define CQE_PORTN CQE_FIELD_LOC(253, 251)
+#define CQE_GRH CQE_FIELD_LOC(254, 254)
+#define CQE_LPK CQE_FIELD_LOC(255, 255)
+#define CQE_RSV3 CQE_FIELD_LOC(511, 256)
struct hns_roce_v2_mpt_entry {
__le32 byte_4_pd_hop_st;
@@ -1153,28 +866,30 @@ struct hns_roce_v2_mpt_entry {
#define V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S 28
#define V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M GENMASK(31, 28)
-#define V2_DB_TAG_S 0
-#define V2_DB_TAG_M GENMASK(23, 0)
+struct hns_roce_v2_db {
+ __le32 data[2];
+};
-#define V2_DB_CMD_S 24
-#define V2_DB_CMD_M GENMASK(27, 24)
+#define DB_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_v2_db, h, l)
-#define V2_DB_FLAG_S 31
+#define DB_TAG DB_FIELD_LOC(23, 0)
+#define DB_CMD DB_FIELD_LOC(27, 24)
+#define DB_FLAG DB_FIELD_LOC(31, 31)
+#define DB_PI DB_FIELD_LOC(47, 32)
+#define DB_SL DB_FIELD_LOC(50, 48)
+#define DB_CQ_CI DB_FIELD_LOC(55, 32)
+#define DB_CQ_NOTIFY DB_FIELD_LOC(56, 56)
+#define DB_CQ_CMD_SN DB_FIELD_LOC(58, 57)
+#define EQ_DB_TAG DB_FIELD_LOC(7, 0)
+#define EQ_DB_CMD DB_FIELD_LOC(17, 16)
+#define EQ_DB_CI DB_FIELD_LOC(55, 32)
#define V2_DB_PRODUCER_IDX_S 0
#define V2_DB_PRODUCER_IDX_M GENMASK(15, 0)
-#define V2_DB_SL_S 16
-#define V2_DB_SL_M GENMASK(18, 16)
-
#define V2_CQ_DB_CONS_IDX_S 0
#define V2_CQ_DB_CONS_IDX_M GENMASK(23, 0)
-#define V2_CQ_DB_NOTIFY_TYPE_S 24
-
-#define V2_CQ_DB_CMD_SN_S 25
-#define V2_CQ_DB_CMD_SN_M GENMASK(26, 25)
-
struct hns_roce_v2_ud_send_wqe {
__le32 byte_4;
__le32 msg_len;
@@ -1272,16 +987,6 @@ struct hns_roce_v2_rc_send_wqe {
#define V2_RC_SEND_WQE_BYTE_4_INLINE_S 12
-#define V2_RC_FRMR_WQE_BYTE_40_BIND_EN_S 10
-
-#define V2_RC_FRMR_WQE_BYTE_40_ATOMIC_S 11
-
-#define V2_RC_FRMR_WQE_BYTE_40_RR_S 12
-
-#define V2_RC_FRMR_WQE_BYTE_40_RW_S 13
-
-#define V2_RC_FRMR_WQE_BYTE_40_LW_S 14
-
#define V2_RC_SEND_WQE_BYTE_4_FLAG_S 31
#define V2_RC_SEND_WQE_BYTE_16_XRC_SRQN_S 0
@@ -1300,10 +1005,18 @@ struct hns_roce_wqe_frmr_seg {
__le32 byte_40;
};
-#define V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_S 4
-#define V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_M GENMASK(7, 4)
+#define FRMR_WQE_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_wqe_frmr_seg, h, l)
-#define V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S 8
+#define FRMR_PBL_SIZE FRMR_WQE_FIELD_LOC(31, 0)
+#define FRMR_BLOCK_SIZE FRMR_WQE_FIELD_LOC(35, 32)
+#define FRMR_PBL_BUF_PG_SZ FRMR_WQE_FIELD_LOC(39, 36)
+#define FRMR_BLK_MODE FRMR_WQE_FIELD_LOC(40, 40)
+#define FRMR_ZBVA FRMR_WQE_FIELD_LOC(41, 41)
+#define FRMR_BIND_EN FRMR_WQE_FIELD_LOC(42, 42)
+#define FRMR_ATOMIC FRMR_WQE_FIELD_LOC(43, 43)
+#define FRMR_RR FRMR_WQE_FIELD_LOC(44, 44)
+#define FRMR_RW FRMR_WQE_FIELD_LOC(45, 45)
+#define FRMR_LW FRMR_WQE_FIELD_LOC(46, 46)
struct hns_roce_v2_wqe_data_seg {
__le32 len;
@@ -1311,11 +1024,6 @@ struct hns_roce_v2_wqe_data_seg {
__le64 addr;
};
-struct hns_roce_v2_db {
- __le32 byte_4;
- __le32 parameter;
-};
-
struct hns_roce_query_version {
__le16 rocee_vendor_id;
__le16 rocee_hw_version;
@@ -1342,39 +1050,27 @@ struct hns_roce_func_clear {
#define HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_INTERVAL 40
#define HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT 20
-struct hns_roce_cfg_llm_a {
- __le32 base_addr_l;
- __le32 base_addr_h;
- __le32 depth_pgsz_init_en;
- __le32 head_ba_l;
- __le32 head_ba_h_nxtptr;
- __le32 head_ptr;
-};
-
-#define CFG_LLM_QUE_DEPTH_S 0
-#define CFG_LLM_QUE_DEPTH_M GENMASK(12, 0)
-
-#define CFG_LLM_QUE_PGSZ_S 16
-#define CFG_LLM_QUE_PGSZ_M GENMASK(19, 16)
-
-#define CFG_LLM_INIT_EN_S 20
-#define CFG_LLM_INIT_EN_M GENMASK(20, 20)
-
-#define CFG_LLM_HEAD_PTR_S 0
-#define CFG_LLM_HEAD_PTR_M GENMASK(11, 0)
-
-struct hns_roce_cfg_llm_b {
- __le32 tail_ba_l;
- __le32 tail_ba_h;
- __le32 tail_ptr;
- __le32 rsv[3];
-};
-
-#define CFG_LLM_TAIL_BA_H_S 0
-#define CFG_LLM_TAIL_BA_H_M GENMASK(19, 0)
-
-#define CFG_LLM_TAIL_PTR_S 0
-#define CFG_LLM_TAIL_PTR_M GENMASK(11, 0)
+/* Fields of HNS_ROCE_OPC_EXT_CFG */
+#define EXT_CFG_VF_ID CMQ_REQ_FIELD_LOC(31, 0)
+#define EXT_CFG_QP_PI_IDX CMQ_REQ_FIELD_LOC(45, 32)
+#define EXT_CFG_QP_PI_NUM CMQ_REQ_FIELD_LOC(63, 48)
+#define EXT_CFG_QP_NUM CMQ_REQ_FIELD_LOC(87, 64)
+#define EXT_CFG_QP_IDX CMQ_REQ_FIELD_LOC(119, 96)
+#define EXT_CFG_LLM_IDX CMQ_REQ_FIELD_LOC(139, 128)
+#define EXT_CFG_LLM_NUM CMQ_REQ_FIELD_LOC(156, 144)
+
+#define CFG_LLM_A_BA_L CMQ_REQ_FIELD_LOC(31, 0)
+#define CFG_LLM_A_BA_H CMQ_REQ_FIELD_LOC(63, 32)
+#define CFG_LLM_A_DEPTH CMQ_REQ_FIELD_LOC(76, 64)
+#define CFG_LLM_A_PGSZ CMQ_REQ_FIELD_LOC(83, 80)
+#define CFG_LLM_A_INIT_EN CMQ_REQ_FIELD_LOC(84, 84)
+#define CFG_LLM_A_HEAD_BA_L CMQ_REQ_FIELD_LOC(127, 96)
+#define CFG_LLM_A_HEAD_BA_H CMQ_REQ_FIELD_LOC(147, 128)
+#define CFG_LLM_A_HEAD_NXTPTR CMQ_REQ_FIELD_LOC(159, 148)
+#define CFG_LLM_A_HEAD_PTR CMQ_REQ_FIELD_LOC(171, 160)
+#define CFG_LLM_B_TAIL_BA_L CMQ_REQ_FIELD_LOC(31, 0)
+#define CFG_LLM_B_TAIL_BA_H CMQ_REQ_FIELD_LOC(63, 32)
+#define CFG_LLM_B_TAIL_PTR CMQ_REQ_FIELD_LOC(75, 64)
/* Fields of HNS_ROCE_OPC_CFG_GLOBAL_PARAM */
#define CFG_GLOBAL_PARAM_1US_CYCLES CMQ_REQ_FIELD_LOC(9, 0)
@@ -1642,6 +1338,7 @@ struct hns_roce_congestion_algorithm {
u8 alg_sel;
u8 alg_sub_sel;
u8 dip_vld;
+ u8 wnd_mode_sel;
};
#define V2_QUERY_PF_CAPS_D_CEQ_DEPTH_S 0
@@ -1731,52 +1428,21 @@ struct hns_roce_v2_cmq_ring {
struct hns_roce_v2_cmq {
struct hns_roce_v2_cmq_ring csq;
- struct hns_roce_v2_cmq_ring crq;
u16 tx_timeout;
};
-enum hns_roce_link_table_type {
- TSQ_LINK_TABLE,
- TPQ_LINK_TABLE,
-};
-
struct hns_roce_link_table {
struct hns_roce_buf_list table;
- struct hns_roce_buf_list *pg_list;
- u32 npages;
- u32 pg_sz;
+ struct hns_roce_buf *buf;
};
-struct hns_roce_link_table_entry {
- u32 blk_ba0;
- u32 blk_ba1_nxt_ptr;
-};
-#define HNS_ROCE_LINK_TABLE_BA1_S 0
-#define HNS_ROCE_LINK_TABLE_BA1_M GENMASK(19, 0)
-
-#define HNS_ROCE_LINK_TABLE_NXT_PTR_S 20
-#define HNS_ROCE_LINK_TABLE_NXT_PTR_M GENMASK(31, 20)
+#define HNS_ROCE_EXT_LLM_ENTRY(addr, id) (((id) << (64 - 12)) | ((addr) >> 12))
+#define HNS_ROCE_EXT_LLM_MIN_PAGES(que_num) ((que_num) * 4 + 2)
struct hns_roce_v2_priv {
struct hnae3_handle *handle;
struct hns_roce_v2_cmq cmq;
- struct hns_roce_link_table tsq;
- struct hns_roce_link_table tpq;
-};
-
-struct hns_roce_eq_context {
- __le32 byte_4;
- __le32 byte_8;
- __le32 byte_12;
- __le32 eqe_report_timer;
- __le32 eqe_ba0;
- __le32 eqe_ba1;
- __le32 byte_28;
- __le32 byte_32;
- __le32 byte_36;
- __le32 byte_40;
- __le32 byte_44;
- __le32 rsv[5];
+ struct hns_roce_link_table ext_llm;
};
struct hns_roce_dip {
@@ -1840,6 +1506,10 @@ struct hns_roce_dip {
#define HNS_ROCE_V2_VF_ABN_INT_CFG_M GENMASK(2, 0)
#define HNS_ROCE_V2_VF_EVENT_INT_EN_M GENMASK(0, 0)
+struct hns_roce_eq_context {
+ __le32 data[16];
+};
+
#define EQC_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_eq_context, h, l)
#define EQC_EQ_ST EQC_FIELD_LOC(1, 0)
@@ -1876,15 +1546,6 @@ struct hns_roce_dip {
#define HNS_ROCE_V2_AEQE_SUB_TYPE_S 8
#define HNS_ROCE_V2_AEQE_SUB_TYPE_M GENMASK(15, 8)
-#define V2_EQ_DB_TAG_S 0
-#define V2_EQ_DB_TAG_M GENMASK(7, 0)
-
-#define V2_EQ_DB_CMD_S 16
-#define V2_EQ_DB_CMD_M GENMASK(17, 16)
-
-#define V2_EQ_DB_CONS_IDX_S 0
-#define V2_EQ_DB_CONS_IDX_M GENMASK(23, 0)
-
#define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S 0
#define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M GENMASK(23, 0)
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index 6c6e82b11d8b..078a97193f0e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -748,34 +748,16 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
goto err_uar_table_free;
}
- ret = hns_roce_init_pd_table(hr_dev);
- if (ret) {
- dev_err(dev, "Failed to init protected domain table.\n");
- goto err_uar_alloc_free;
- }
+ hns_roce_init_pd_table(hr_dev);
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC) {
- ret = hns_roce_init_xrcd_table(hr_dev);
- if (ret) {
- dev_err(dev, "failed to init xrcd table, ret = %d.\n",
- ret);
- goto err_pd_table_free;
- }
- }
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
+ hns_roce_init_xrcd_table(hr_dev);
- ret = hns_roce_init_mr_table(hr_dev);
- if (ret) {
- dev_err(dev, "Failed to init memory region table.\n");
- goto err_xrcd_table_free;
- }
+ hns_roce_init_mr_table(hr_dev);
hns_roce_init_cq_table(hr_dev);
- ret = hns_roce_init_qp_table(hr_dev);
- if (ret) {
- dev_err(dev, "Failed to init queue pair table.\n");
- goto err_cq_table_free;
- }
+ hns_roce_init_qp_table(hr_dev);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
ret = hns_roce_init_srq_table(hr_dev);
@@ -790,19 +772,13 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
err_qp_table_free:
hns_roce_cleanup_qp_table(hr_dev);
-
-err_cq_table_free:
hns_roce_cleanup_cq_table(hr_dev);
- hns_roce_cleanup_mr_table(hr_dev);
+ ida_destroy(&hr_dev->mr_table.mtpt_ida.ida);
-err_xrcd_table_free:
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
- hns_roce_cleanup_xrcd_table(hr_dev);
-
-err_pd_table_free:
- hns_roce_cleanup_pd_table(hr_dev);
+ ida_destroy(&hr_dev->xrcd_ida.ida);
-err_uar_alloc_free:
+ ida_destroy(&hr_dev->pd_ida.ida);
hns_roce_uar_free(hr_dev, &hr_dev->priv_uar);
err_uar_table_free:
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 79b3c3023fe7..006c84bb3f9f 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -38,9 +38,9 @@
#include "hns_roce_cmd.h"
#include "hns_roce_hem.h"
-static u32 hw_index_to_key(unsigned long ind)
+static u32 hw_index_to_key(int ind)
{
- return (u32)(ind >> 24) | (ind << 8);
+ return ((u32)ind >> 24) | ((u32)ind << 8);
}
unsigned long key_to_hw_index(u32 key)
@@ -68,22 +68,23 @@ int hns_roce_hw_destroy_mpt(struct hns_roce_dev *hr_dev,
static int alloc_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
{
+ struct hns_roce_ida *mtpt_ida = &hr_dev->mr_table.mtpt_ida;
struct ib_device *ibdev = &hr_dev->ib_dev;
- unsigned long obj = 0;
int err;
+ int id;
/* Allocate a key for mr from mr_table */
- err = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &obj);
- if (err) {
- ibdev_err(ibdev,
- "failed to alloc bitmap for MR key, ret = %d.\n",
- err);
+ id = ida_alloc_range(&mtpt_ida->ida, mtpt_ida->min, mtpt_ida->max,
+ GFP_KERNEL);
+ if (id < 0) {
+ ibdev_err(ibdev, "failed to alloc id for MR key, id(%d)\n", id);
return -ENOMEM;
}
- mr->key = hw_index_to_key(obj); /* MR key */
+ mr->key = hw_index_to_key(id); /* MR key */
- err = hns_roce_table_get(hr_dev, &hr_dev->mr_table.mtpt_table, obj);
+ err = hns_roce_table_get(hr_dev, &hr_dev->mr_table.mtpt_table,
+ (unsigned long)id);
if (err) {
ibdev_err(ibdev, "failed to alloc mtpt, ret = %d.\n", err);
goto err_free_bitmap;
@@ -91,7 +92,7 @@ static int alloc_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
return 0;
err_free_bitmap:
- hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap, obj, BITMAP_NO_RR);
+ ida_free(&mtpt_ida->ida, id);
return err;
}
@@ -100,7 +101,7 @@ static void free_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
unsigned long obj = key_to_hw_index(mr->key);
hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table, obj);
- hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap, obj, BITMAP_NO_RR);
+ ida_free(&hr_dev->mr_table.mtpt_ida.ida, (int)obj);
}
static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
@@ -122,7 +123,7 @@ static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
buf_attr.mtt_only = is_fast;
err = hns_roce_mtr_create(hr_dev, &mr->pbl_mtr, &buf_attr,
- hr_dev->caps.pbl_ba_pg_sz + HNS_HW_PAGE_SHIFT,
+ hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT,
udata, start);
if (err)
ibdev_err(ibdev, "failed to alloc pbl mtr, ret = %d.\n", err);
@@ -196,23 +197,13 @@ err_page:
return ret;
}
-int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
- int ret;
-
- ret = hns_roce_bitmap_init(&mr_table->mtpt_bitmap,
- hr_dev->caps.num_mtpts,
- hr_dev->caps.num_mtpts - 1,
- hr_dev->caps.reserved_mrws, 0);
- return ret;
-}
-
-void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev)
+void hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
{
- struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
+ struct hns_roce_ida *mtpt_ida = &hr_dev->mr_table.mtpt_ida;
- hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
+ ida_init(&mtpt_ida->ida);
+ mtpt_ida->max = hr_dev->caps.num_mtpts - 1;
+ mtpt_ida->min = hr_dev->caps.reserved_mrws;
}
struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
@@ -503,8 +494,8 @@ static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
key_to_hw_index(mw->rkey));
}
- hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
- key_to_hw_index(mw->rkey), BITMAP_NO_RR);
+ ida_free(&hr_dev->mr_table.mtpt_ida.ida,
+ (int)key_to_hw_index(mw->rkey));
}
static int hns_roce_mw_enable(struct hns_roce_dev *hr_dev,
@@ -558,16 +549,21 @@ err_table:
int hns_roce_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibmw->device);
+ struct hns_roce_ida *mtpt_ida = &hr_dev->mr_table.mtpt_ida;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_mw *mw = to_hr_mw(ibmw);
- unsigned long index = 0;
int ret;
+ int id;
- /* Allocate a key for mw from bitmap */
- ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index);
- if (ret)
- return ret;
+ /* Allocate a key for mw from mr_table */
+ id = ida_alloc_range(&mtpt_ida->ida, mtpt_ida->min, mtpt_ida->max,
+ GFP_KERNEL);
+ if (id < 0) {
+ ibdev_err(ibdev, "failed to alloc id for MW key, id(%d)\n", id);
+ return -ENOMEM;
+ }
- mw->rkey = hw_index_to_key(index);
+ mw->rkey = hw_index_to_key(id);
ibmw->rkey = mw->rkey;
mw->pdn = to_hr_pd(ibmw->pd)->pdn;
@@ -737,11 +733,11 @@ static int mtr_map_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
return -ENOMEM;
if (mtr->umem)
- npage = hns_roce_get_umem_bufs(hr_dev, pages, page_count, 0,
+ npage = hns_roce_get_umem_bufs(hr_dev, pages, page_count,
mtr->umem, page_shift);
else
- npage = hns_roce_get_kmem_bufs(hr_dev, pages, page_count, 0,
- mtr->kmem);
+ npage = hns_roce_get_kmem_bufs(hr_dev, pages, page_count,
+ mtr->kmem, page_shift);
if (npage != page_count) {
ibdev_err(ibdev, "failed to get mtr page %d != %d.\n", npage,
@@ -753,8 +749,8 @@ static int mtr_map_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
if (mtr->hem_cfg.is_direct && npage > 1) {
ret = mtr_check_direct_pages(pages, npage, page_shift);
if (ret) {
- ibdev_err(ibdev, "failed to check %s mtr, idx = %d.\n",
- mtr->umem ? "user" : "kernel", ret);
+ ibdev_err(ibdev, "failed to check %s page: %d / %d.\n",
+ mtr->umem ? "umtr" : "kmtr", ret, npage);
ret = -ENOBUFS;
goto err_alloc_list;
}
@@ -776,7 +772,7 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_buf_region *r;
unsigned int i, mapped_cnt;
- int ret;
+ int ret = 0;
/*
* Only use the first page address as root ba when hopnum is 0, this
@@ -799,7 +795,7 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
if (r->offset + r->count > page_cnt) {
ret = -EINVAL;
ibdev_err(ibdev,
- "failed to check mtr%u end %u + %u, max %u.\n",
+ "failed to check mtr%u count %u + %u > %u.\n",
i, r->offset, r->count, page_cnt);
return ret;
}
@@ -992,7 +988,7 @@ int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
&buf_page_shift,
udata ? user_addr & ~PAGE_MASK : 0);
if (buf_page_cnt < 1 || buf_page_shift < HNS_HW_PAGE_SHIFT) {
- ibdev_err(ibdev, "failed to init mtr cfg, count %d shift %d.\n",
+ ibdev_err(ibdev, "failed to init mtr cfg, count %d shift %u.\n",
buf_page_cnt, buf_page_shift);
return -EINVAL;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
index a5813bf567b2..ea5663630985 100644
--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
@@ -34,39 +34,31 @@
#include <linux/pci.h>
#include "hns_roce_device.h"
-static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn)
+void hns_roce_init_pd_table(struct hns_roce_dev *hr_dev)
{
- return hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, pdn) ? -ENOMEM : 0;
-}
-
-static void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn)
-{
- hns_roce_bitmap_free(&hr_dev->pd_bitmap, pdn, BITMAP_NO_RR);
-}
-
-int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev)
-{
- return hns_roce_bitmap_init(&hr_dev->pd_bitmap, hr_dev->caps.num_pds,
- hr_dev->caps.num_pds - 1,
- hr_dev->caps.reserved_pds, 0);
-}
+ struct hns_roce_ida *pd_ida = &hr_dev->pd_ida;
-void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev)
-{
- hns_roce_bitmap_cleanup(&hr_dev->pd_bitmap);
+ ida_init(&pd_ida->ida);
+ pd_ida->max = hr_dev->caps.num_pds - 1;
+ pd_ida->min = hr_dev->caps.reserved_pds;
}
int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct ib_device *ib_dev = ibpd->device;
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
+ struct hns_roce_ida *pd_ida = &hr_dev->pd_ida;
struct hns_roce_pd *pd = to_hr_pd(ibpd);
- int ret;
+ int ret = 0;
+ int id;
- ret = hns_roce_pd_alloc(to_hr_dev(ib_dev), &pd->pdn);
- if (ret) {
- ibdev_err(ib_dev, "failed to alloc pd, ret = %d.\n", ret);
- return ret;
+ id = ida_alloc_range(&pd_ida->ida, pd_ida->min, pd_ida->max,
+ GFP_KERNEL);
+ if (id < 0) {
+ ibdev_err(ib_dev, "failed to alloc pd, id = %d.\n", id);
+ return -ENOMEM;
}
+ pd->pdn = (unsigned long)id;
if (udata) {
struct hns_roce_ib_alloc_pd_resp resp = {.pdn = pd->pdn};
@@ -74,7 +66,7 @@ int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
ret = ib_copy_to_udata(udata, &resp,
min(udata->outlen, sizeof(resp)));
if (ret) {
- hns_roce_pd_free(to_hr_dev(ib_dev), pd->pdn);
+ ida_free(&pd_ida->ida, id);
ibdev_err(ib_dev, "failed to copy to udata, ret = %d\n", ret);
}
}
@@ -84,7 +76,10 @@ int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
int hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
{
- hns_roce_pd_free(to_hr_dev(pd->device), to_hr_pd(pd)->pdn);
+ struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
+
+ ida_free(&hr_dev->pd_ida.ida, (int)to_hr_pd(pd)->pdn);
+
return 0;
}
@@ -121,8 +116,7 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
void hns_roce_uar_free(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
{
- hns_roce_bitmap_free(&hr_dev->uar_table.bitmap, uar->logic_idx,
- BITMAP_NO_RR);
+ hns_roce_bitmap_free(&hr_dev->uar_table.bitmap, uar->logic_idx);
}
int hns_roce_init_uar_table(struct hns_roce_dev *hr_dev)
@@ -140,35 +134,27 @@ void hns_roce_cleanup_uar_table(struct hns_roce_dev *hr_dev)
static int hns_roce_xrcd_alloc(struct hns_roce_dev *hr_dev, u32 *xrcdn)
{
- unsigned long obj;
- int ret;
+ struct hns_roce_ida *xrcd_ida = &hr_dev->xrcd_ida;
+ int id;
- ret = hns_roce_bitmap_alloc(&hr_dev->xrcd_bitmap, &obj);
- if (ret)
- return ret;
-
- *xrcdn = obj;
+ id = ida_alloc_range(&xrcd_ida->ida, xrcd_ida->min, xrcd_ida->max,
+ GFP_KERNEL);
+ if (id < 0) {
+ ibdev_err(&hr_dev->ib_dev, "failed to alloc xrcdn(%d).\n", id);
+ return -ENOMEM;
+ }
+ *xrcdn = (u32)id;
return 0;
}
-static void hns_roce_xrcd_free(struct hns_roce_dev *hr_dev,
- u32 xrcdn)
-{
- hns_roce_bitmap_free(&hr_dev->xrcd_bitmap, xrcdn, BITMAP_NO_RR);
-}
-
-int hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev)
+void hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev)
{
- return hns_roce_bitmap_init(&hr_dev->xrcd_bitmap,
- hr_dev->caps.num_xrcds,
- hr_dev->caps.num_xrcds - 1,
- hr_dev->caps.reserved_xrcds, 0);
-}
+ struct hns_roce_ida *xrcd_ida = &hr_dev->xrcd_ida;
-void hns_roce_cleanup_xrcd_table(struct hns_roce_dev *hr_dev)
-{
- hns_roce_bitmap_cleanup(&hr_dev->xrcd_bitmap);
+ ida_init(&xrcd_ida->ida);
+ xrcd_ida->max = hr_dev->caps.num_xrcds - 1;
+ xrcd_ida->min = hr_dev->caps.reserved_xrcds;
}
int hns_roce_alloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata)
@@ -181,18 +167,18 @@ int hns_roce_alloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata)
return -EINVAL;
ret = hns_roce_xrcd_alloc(hr_dev, &xrcd->xrcdn);
- if (ret) {
- dev_err(hr_dev->dev, "failed to alloc xrcdn, ret = %d.\n", ret);
+ if (ret)
return ret;
- }
return 0;
}
int hns_roce_dealloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata)
{
- hns_roce_xrcd_free(to_hr_dev(ib_xrcd->device),
- to_hr_xrcd(ib_xrcd)->xrcdn);
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_xrcd->device);
+ u32 xrcdn = to_hr_xrcd(ib_xrcd)->xrcdn;
+
+ ida_free(&hr_dev->xrcd_ida.ida, (int)xrcdn);
return 0;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 230a909ba9bc..b101b7e578f2 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -65,7 +65,7 @@ static void flush_work_handle(struct work_struct *work)
* make sure we signal QP destroy leg that flush QP was completed
* so that it can safely proceed ahead now and destroy QP
*/
- if (atomic_dec_and_test(&hr_qp->refcount))
+ if (refcount_dec_and_test(&hr_qp->refcount))
complete(&hr_qp->free);
}
@@ -75,10 +75,25 @@ void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
flush_work->hr_dev = hr_dev;
INIT_WORK(&flush_work->work, flush_work_handle);
- atomic_inc(&hr_qp->refcount);
+ refcount_inc(&hr_qp->refcount);
queue_work(hr_dev->irq_workq, &flush_work->work);
}
+void flush_cqe(struct hns_roce_dev *dev, struct hns_roce_qp *qp)
+{
+ /*
+ * Hip08 hardware cannot flush the WQEs in SQ/RQ if the QP state
+ * gets into errored mode. Hence, as a workaround to this
+ * hardware limitation, driver needs to assist in flushing. But
+ * the flushing operation uses mailbox to convey the QP state to
+ * the hardware and which can sleep due to the mutex protection
+ * around the mailbox calls. Hence, use the deferred flush for
+ * now.
+ */
+ if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
+ init_flush_work(dev, qp);
+}
+
void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
{
struct device *dev = hr_dev->dev;
@@ -87,7 +102,7 @@ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
xa_lock(&hr_dev->qp_table_xa);
qp = __hns_roce_qp_lookup(hr_dev, qpn);
if (qp)
- atomic_inc(&qp->refcount);
+ refcount_inc(&qp->refcount);
xa_unlock(&hr_dev->qp_table_xa);
if (!qp) {
@@ -102,13 +117,13 @@ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
event_type == HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION ||
event_type == HNS_ROCE_EVENT_TYPE_INVALID_XRCETH)) {
qp->state = IB_QPS_ERR;
- if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
- init_flush_work(hr_dev, qp);
+
+ flush_cqe(hr_dev, qp);
}
qp->event(qp, (enum hns_roce_event)event_type);
- if (atomic_dec_and_test(&qp->refcount))
+ if (refcount_dec_and_test(&qp->refcount))
complete(&qp->free);
}
@@ -379,7 +394,7 @@ void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
list_del(&hr_qp->rq_node);
xa_lock_irqsave(xa, flags);
- __xa_erase(xa, hr_qp->qpn & (hr_dev->caps.num_qps - 1));
+ __xa_erase(xa, hr_qp->qpn);
xa_unlock_irqrestore(xa, flags);
}
@@ -648,9 +663,7 @@ static int set_kernel_sq_size(struct hns_roce_dev *hr_dev,
if (!cap->max_send_wr || cap->max_send_wr > hr_dev->caps.max_wqes ||
cap->max_send_sge > hr_dev->caps.max_sq_sg) {
- ibdev_err(ibdev,
- "failed to check SQ WR or SGE num, ret = %d.\n",
- -EINVAL);
+ ibdev_err(ibdev, "failed to check SQ WR or SGE num.\n");
return -EINVAL;
}
@@ -761,7 +774,7 @@ static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
goto err_inline;
}
ret = hns_roce_mtr_create(hr_dev, &hr_qp->mtr, &buf_attr,
- HNS_HW_PAGE_SHIFT + hr_dev->caps.mtt_ba_pg_sz,
+ PAGE_SHIFT + hr_dev->caps.mtt_ba_pg_sz,
udata, addr);
if (ret) {
ibdev_err(ibdev, "failed to create WQE mtr, ret = %d.\n", ret);
@@ -826,7 +839,7 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
if (udata) {
if (user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd)) {
- ret = hns_roce_db_map_user(uctx, udata, ucmd->sdb_addr,
+ ret = hns_roce_db_map_user(uctx, ucmd->sdb_addr,
&hr_qp->sdb);
if (ret) {
ibdev_err(ibdev,
@@ -839,7 +852,7 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
}
if (user_qp_has_rdb(hr_dev, init_attr, udata, resp)) {
- ret = hns_roce_db_map_user(uctx, udata, ucmd->db_addr,
+ ret = hns_roce_db_map_user(uctx, ucmd->db_addr,
&hr_qp->rdb);
if (ret) {
ibdev_err(ibdev,
@@ -1076,7 +1089,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
hr_qp->ibqp.qp_num = hr_qp->qpn;
hr_qp->event = hns_roce_ib_qp_event;
- atomic_set(&hr_qp->refcount, 1);
+ refcount_set(&hr_qp->refcount, 1);
init_completion(&hr_qp->free);
return 0;
@@ -1099,7 +1112,7 @@ err_buf:
void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
struct ib_udata *udata)
{
- if (atomic_dec_and_test(&hr_qp->refcount))
+ if (refcount_dec_and_test(&hr_qp->refcount))
complete(&hr_qp->free);
wait_for_completion(&hr_qp->free);
@@ -1416,7 +1429,7 @@ bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, u32 nreq,
return cur + nreq >= hr_wq->wqe_cnt;
}
-int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
+void hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
{
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
unsigned int reserved_from_bot;
@@ -1439,8 +1452,6 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
HNS_ROCE_QP_BANK_NUM - 1;
hr_dev->qp_table.bank[i].next = hr_dev->qp_table.bank[i].min;
}
-
- return 0;
}
void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
index 546d182c577a..6f2992f443fa 100644
--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
@@ -17,7 +17,7 @@ void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type)
xa_lock(&srq_table->xa);
srq = xa_load(&srq_table->xa, srqn & (hr_dev->caps.num_srqs - 1));
if (srq)
- atomic_inc(&srq->refcount);
+ refcount_inc(&srq->refcount);
xa_unlock(&srq_table->xa);
if (!srq) {
@@ -27,7 +27,7 @@ void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type)
srq->event(srq, event_type);
- if (atomic_dec_and_test(&srq->refcount))
+ if (refcount_dec_and_test(&srq->refcount))
complete(&srq->free);
}
@@ -132,7 +132,7 @@ err_xa:
err_put:
hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
err_out:
- hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR);
+ hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn);
return ret;
}
@@ -149,12 +149,12 @@ static void free_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
xa_erase(&srq_table->xa, srq->srqn);
- if (atomic_dec_and_test(&srq->refcount))
+ if (refcount_dec_and_test(&srq->refcount))
complete(&srq->free);
wait_for_completion(&srq->free);
hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
- hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR);
+ hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn);
}
static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
@@ -167,14 +167,14 @@ static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
srq->idx_que.entry_shift = ilog2(HNS_ROCE_IDX_QUE_ENTRY_SZ);
- buf_attr.page_shift = hr_dev->caps.idx_buf_pg_sz + HNS_HW_PAGE_SHIFT;
+ buf_attr.page_shift = hr_dev->caps.idx_buf_pg_sz + PAGE_SHIFT;
buf_attr.region[0].size = to_hr_hem_entries_size(srq->wqe_cnt,
srq->idx_que.entry_shift);
buf_attr.region[0].hopnum = hr_dev->caps.idx_hop_num;
buf_attr.region_count = 1;
ret = hns_roce_mtr_create(hr_dev, &idx_que->mtr, &buf_attr,
- hr_dev->caps.idx_ba_pg_sz + HNS_HW_PAGE_SHIFT,
+ hr_dev->caps.idx_ba_pg_sz + PAGE_SHIFT,
udata, addr);
if (ret) {
ibdev_err(ibdev,
@@ -222,15 +222,15 @@ static int alloc_srq_wqe_buf(struct hns_roce_dev *hr_dev,
HNS_ROCE_SGE_SIZE *
srq->max_gs)));
- buf_attr.page_shift = hr_dev->caps.srqwqe_buf_pg_sz + HNS_HW_PAGE_SHIFT;
+ buf_attr.page_shift = hr_dev->caps.srqwqe_buf_pg_sz + PAGE_SHIFT;
buf_attr.region[0].size = to_hr_hem_entries_size(srq->wqe_cnt,
srq->wqe_shift);
buf_attr.region[0].hopnum = hr_dev->caps.srqwqe_hop_num;
buf_attr.region_count = 1;
ret = hns_roce_mtr_create(hr_dev, &srq->buf_mtr, &buf_attr,
- hr_dev->caps.srqwqe_ba_pg_sz +
- HNS_HW_PAGE_SHIFT, udata, addr);
+ hr_dev->caps.srqwqe_ba_pg_sz + PAGE_SHIFT,
+ udata, addr);
if (ret)
ibdev_err(ibdev,
"failed to alloc SRQ buf mtr, ret = %d.\n", ret);
@@ -417,7 +417,7 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
srq->db_reg = hr_dev->reg_base + SRQ_DB_REG;
srq->event = hns_roce_ib_srq_event;
- atomic_set(&srq->refcount, 1);
+ refcount_set(&srq->refcount, 1);
init_completion(&srq->free);
return 0;
diff --git a/drivers/infiniband/hw/i40iw/Kconfig b/drivers/infiniband/hw/i40iw/Kconfig
deleted file mode 100644
index 7476f3b14c39..000000000000
--- a/drivers/infiniband/hw/i40iw/Kconfig
+++ /dev/null
@@ -1,9 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config INFINIBAND_I40IW
- tristate "Intel(R) Ethernet X722 iWARP Driver"
- depends on INET && I40E
- depends on IPV6 || !IPV6
- depends on PCI
- select GENERIC_ALLOCATOR
- help
- Intel(R) Ethernet X722 iWARP Driver
diff --git a/drivers/infiniband/hw/i40iw/Makefile b/drivers/infiniband/hw/i40iw/Makefile
deleted file mode 100644
index 34da9eba8a7c..000000000000
--- a/drivers/infiniband/hw/i40iw/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-
-obj-$(CONFIG_INFINIBAND_I40IW) += i40iw.o
-
-i40iw-objs :=\
- i40iw_cm.o i40iw_ctrl.o \
- i40iw_hmc.o i40iw_hw.o i40iw_main.o \
- i40iw_pble.o i40iw_puda.o i40iw_uk.o i40iw_utils.o \
- i40iw_verbs.o i40iw_virtchnl.o i40iw_vf.o
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
deleted file mode 100644
index be4094ac4fac..000000000000
--- a/drivers/infiniband/hw/i40iw/i40iw.h
+++ /dev/null
@@ -1,602 +0,0 @@
-/*******************************************************************************
-*
-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
-*
-* This software is available to you under a choice of one of two
-* licenses. You may choose to be licensed under the terms of the GNU
-* General Public License (GPL) Version 2, available from the file
-* COPYING in the main directory of this source tree, or the
-* OpenFabrics.org BSD license below:
-*
-* Redistribution and use in source and binary forms, with or
-* without modification, are permitted provided that the following
-* conditions are met:
-*
-* - Redistributions of source code must retain the above
-* copyright notice, this list of conditions and the following
-* disclaimer.
-*
-* - Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials
-* provided with the distribution.
-*
-* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-* SOFTWARE.
-*
-*******************************************************************************/
-
-#ifndef I40IW_IW_H
-#define I40IW_IW_H
-#include <linux/netdevice.h>
-#include <linux/inetdevice.h>
-#include <linux/spinlock.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/workqueue.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/crc32c.h>
-#include <linux/net/intel/i40e_client.h>
-#include <rdma/ib_smi.h>
-#include <rdma/ib_verbs.h>
-#include <rdma/ib_pack.h>
-#include <rdma/rdma_cm.h>
-#include <rdma/iw_cm.h>
-#include <crypto/hash.h>
-
-#include "i40iw_status.h"
-#include "i40iw_osdep.h"
-#include "i40iw_d.h"
-#include "i40iw_hmc.h"
-
-#include "i40iw_type.h"
-#include "i40iw_p.h"
-#include <rdma/i40iw-abi.h>
-#include "i40iw_pble.h"
-#include "i40iw_verbs.h"
-#include "i40iw_cm.h"
-#include "i40iw_user.h"
-#include "i40iw_puda.h"
-
-#define I40IW_FW_VER_DEFAULT 2
-#define I40IW_HW_VERSION 2
-
-#define I40IW_ARP_ADD 1
-#define I40IW_ARP_DELETE 2
-#define I40IW_ARP_RESOLVE 3
-
-#define I40IW_MACIP_ADD 1
-#define I40IW_MACIP_DELETE 2
-
-#define IW_CCQ_SIZE (I40IW_CQP_SW_SQSIZE_2048 + 1)
-#define IW_CEQ_SIZE 2048
-#define IW_AEQ_SIZE 2048
-
-#define RX_BUF_SIZE (1536 + 8)
-#define IW_REG0_SIZE (4 * 1024)
-#define IW_TX_TIMEOUT (6 * HZ)
-#define IW_FIRST_QPN 1
-#define IW_SW_CONTEXT_ALIGN 1024
-
-#define MAX_DPC_ITERATIONS 128
-
-#define I40IW_EVENT_TIMEOUT 100000
-#define I40IW_VCHNL_EVENT_TIMEOUT 100000
-
-#define I40IW_NO_VLAN 0xffff
-#define I40IW_NO_QSET 0xffff
-
-/* access to mcast filter list */
-#define IW_ADD_MCAST false
-#define IW_DEL_MCAST true
-
-#define I40IW_DRV_OPT_ENABLE_MPA_VER_0 0x00000001
-#define I40IW_DRV_OPT_DISABLE_MPA_CRC 0x00000002
-#define I40IW_DRV_OPT_DISABLE_FIRST_WRITE 0x00000004
-#define I40IW_DRV_OPT_DISABLE_INTF 0x00000008
-#define I40IW_DRV_OPT_ENABLE_MSI 0x00000010
-#define I40IW_DRV_OPT_DUAL_LOGICAL_PORT 0x00000020
-#define I40IW_DRV_OPT_NO_INLINE_DATA 0x00000080
-#define I40IW_DRV_OPT_DISABLE_INT_MOD 0x00000100
-#define I40IW_DRV_OPT_DISABLE_VIRT_WQ 0x00000200
-#define I40IW_DRV_OPT_ENABLE_PAU 0x00000400
-#define I40IW_DRV_OPT_MCAST_LOGPORT_MAP 0x00000800
-
-#define IW_HMC_OBJ_TYPE_NUM ARRAY_SIZE(iw_hmc_obj_types)
-#define IW_CFG_FPM_QP_COUNT 32768
-#define I40IW_MAX_PAGES_PER_FMR 512
-#define I40IW_MIN_PAGES_PER_FMR 1
-#define I40IW_CQP_COMPL_RQ_WQE_FLUSHED 2
-#define I40IW_CQP_COMPL_SQ_WQE_FLUSHED 3
-#define I40IW_CQP_COMPL_RQ_SQ_WQE_FLUSHED 4
-
-struct i40iw_cqp_compl_info {
- u32 op_ret_val;
- u16 maj_err_code;
- u16 min_err_code;
- bool error;
- u8 op_code;
-};
-
-#define i40iw_pr_err(fmt, args ...) pr_err("%s: "fmt, __func__, ## args)
-
-#define i40iw_pr_info(fmt, args ...) pr_info("%s: " fmt, __func__, ## args)
-
-#define i40iw_pr_warn(fmt, args ...) pr_warn("%s: " fmt, __func__, ## args)
-
-struct i40iw_cqp_request {
- struct cqp_commands_info info;
- wait_queue_head_t waitq;
- struct list_head list;
- atomic_t refcount;
- void (*callback_fcn)(struct i40iw_cqp_request*, u32);
- void *param;
- struct i40iw_cqp_compl_info compl_info;
- bool waiting;
- bool request_done;
- bool dynamic;
-};
-
-struct i40iw_cqp {
- struct i40iw_sc_cqp sc_cqp;
- spinlock_t req_lock; /*cqp request list */
- wait_queue_head_t waitq;
- struct i40iw_dma_mem sq;
- struct i40iw_dma_mem host_ctx;
- u64 *scratch_array;
- struct i40iw_cqp_request *cqp_requests;
- struct list_head cqp_avail_reqs;
- struct list_head cqp_pending_reqs;
-};
-
-struct i40iw_device;
-
-struct i40iw_ccq {
- struct i40iw_sc_cq sc_cq;
- spinlock_t lock; /* ccq control */
- wait_queue_head_t waitq;
- struct i40iw_dma_mem mem_cq;
- struct i40iw_dma_mem shadow_area;
-};
-
-struct i40iw_ceq {
- struct i40iw_sc_ceq sc_ceq;
- struct i40iw_dma_mem mem;
- u32 irq;
- u32 msix_idx;
- struct i40iw_device *iwdev;
- struct tasklet_struct dpc_tasklet;
-};
-
-struct i40iw_aeq {
- struct i40iw_sc_aeq sc_aeq;
- struct i40iw_dma_mem mem;
-};
-
-struct i40iw_arp_entry {
- u32 ip_addr[4];
- u8 mac_addr[ETH_ALEN];
-};
-
-enum init_completion_state {
- INVALID_STATE = 0,
- INITIAL_STATE,
- CQP_CREATED,
- HMC_OBJS_CREATED,
- PBLE_CHUNK_MEM,
- CCQ_CREATED,
- AEQ_CREATED,
- CEQ_CREATED,
- ILQ_CREATED,
- IEQ_CREATED,
- IP_ADDR_REGISTERED,
- RDMA_DEV_REGISTERED
-};
-
-struct i40iw_msix_vector {
- u32 idx;
- u32 irq;
- u32 cpu_affinity;
- u32 ceq_id;
- cpumask_t mask;
-};
-
-struct l2params_work {
- struct work_struct work;
- struct i40iw_device *iwdev;
- struct i40iw_l2params l2params;
-};
-
-#define I40IW_MSIX_TABLE_SIZE 65
-
-struct virtchnl_work {
- struct work_struct work;
- union {
- struct i40iw_cqp_request *cqp_request;
- struct i40iw_virtchnl_work_info work_info;
- };
-};
-
-struct i40e_qvlist_info;
-
-struct i40iw_device {
- struct i40iw_ib_device *iwibdev;
- struct net_device *netdev;
- wait_queue_head_t vchnl_waitq;
- struct i40iw_sc_dev sc_dev;
- struct i40iw_sc_vsi vsi;
- struct i40iw_handler *hdl;
- struct i40e_info *ldev;
- struct i40e_client *client;
- struct i40iw_hw hw;
- struct i40iw_cm_core cm_core;
- u8 *mem_resources;
- unsigned long *allocated_qps;
- unsigned long *allocated_cqs;
- unsigned long *allocated_mrs;
- unsigned long *allocated_pds;
- unsigned long *allocated_arps;
- struct i40iw_qp **qp_table;
- bool msix_shared;
- u32 msix_count;
- struct i40iw_msix_vector *iw_msixtbl;
- struct i40e_qvlist_info *iw_qvlist;
-
- struct i40iw_hmc_pble_rsrc *pble_rsrc;
- struct i40iw_arp_entry *arp_table;
- struct i40iw_cqp cqp;
- struct i40iw_ccq ccq;
- u32 ceqs_count;
- struct i40iw_ceq *ceqlist;
- struct i40iw_aeq aeq;
- u32 arp_table_size;
- u32 next_arp_index;
- spinlock_t resource_lock; /* hw resource access */
- spinlock_t qptable_lock;
- u32 vendor_id;
- u32 vendor_part_id;
- u32 of_device_registered;
-
- u32 device_cap_flags;
- unsigned long db_start;
- u8 resource_profile;
- u8 max_rdma_vfs;
- u8 max_enabled_vfs;
- u8 max_sge;
- u8 iw_status;
- u8 send_term_ok;
-
- /* x710 specific */
- struct mutex pbl_mutex;
- struct tasklet_struct dpc_tasklet;
- struct workqueue_struct *virtchnl_wq;
- struct virtchnl_work virtchnl_w[I40IW_MAX_PE_ENABLED_VF_COUNT];
- struct i40iw_dma_mem obj_mem;
- struct i40iw_dma_mem obj_next;
- u8 *hmc_info_mem;
- u32 sd_type;
- struct workqueue_struct *param_wq;
- atomic_t params_busy;
- enum init_completion_state init_state;
- u16 mac_ip_table_idx;
- atomic_t vchnl_msgs;
- u32 max_mr;
- u32 max_qp;
- u32 max_cq;
- u32 max_pd;
- u32 next_qp;
- u32 next_cq;
- u32 next_pd;
- u32 max_mr_size;
- u32 max_qp_wr;
- u32 max_cqe;
- u32 mr_stagmask;
- u32 mpa_version;
- bool dcb;
- bool closing;
- bool reset;
- u32 used_pds;
- u32 used_cqs;
- u32 used_mrs;
- u32 used_qps;
- wait_queue_head_t close_wq;
- atomic64_t use_count;
-};
-
-struct i40iw_ib_device {
- struct ib_device ibdev;
- struct i40iw_device *iwdev;
-};
-
-struct i40iw_handler {
- struct list_head list;
- struct i40e_client *client;
- struct i40iw_device device;
- struct i40e_info ldev;
-};
-
-/**
- * i40iw_fw_major_ver - get firmware major version
- * @dev: iwarp device
- **/
-static inline u64 i40iw_fw_major_ver(struct i40iw_sc_dev *dev)
-{
- return RS_64(dev->feature_info[I40IW_FEATURE_FW_INFO],
- I40IW_FW_VER_MAJOR);
-}
-
-/**
- * i40iw_fw_minor_ver - get firmware minor version
- * @dev: iwarp device
- **/
-static inline u64 i40iw_fw_minor_ver(struct i40iw_sc_dev *dev)
-{
- return RS_64(dev->feature_info[I40IW_FEATURE_FW_INFO],
- I40IW_FW_VER_MINOR);
-}
-
-/**
- * to_iwdev - get device
- * @ibdev: ib device
- **/
-static inline struct i40iw_device *to_iwdev(struct ib_device *ibdev)
-{
- return container_of(ibdev, struct i40iw_ib_device, ibdev)->iwdev;
-}
-
-/**
- * to_ucontext - get user context
- * @ibucontext: ib user context
- **/
-static inline struct i40iw_ucontext *to_ucontext(struct ib_ucontext *ibucontext)
-{
- return container_of(ibucontext, struct i40iw_ucontext, ibucontext);
-}
-
-/**
- * to_iwpd - get protection domain
- * @ibpd: ib pd
- **/
-static inline struct i40iw_pd *to_iwpd(struct ib_pd *ibpd)
-{
- return container_of(ibpd, struct i40iw_pd, ibpd);
-}
-
-/**
- * to_iwmr - get device memory region
- * @ibdev: ib memory region
- **/
-static inline struct i40iw_mr *to_iwmr(struct ib_mr *ibmr)
-{
- return container_of(ibmr, struct i40iw_mr, ibmr);
-}
-
-/**
- * to_iwmw - get device memory window
- * @ibmw: ib memory window
- **/
-static inline struct i40iw_mr *to_iwmw(struct ib_mw *ibmw)
-{
- return container_of(ibmw, struct i40iw_mr, ibmw);
-}
-
-/**
- * to_iwcq - get completion queue
- * @ibcq: ib cqdevice
- **/
-static inline struct i40iw_cq *to_iwcq(struct ib_cq *ibcq)
-{
- return container_of(ibcq, struct i40iw_cq, ibcq);
-}
-
-/**
- * to_iwqp - get device qp
- * @ibqp: ib qp
- **/
-static inline struct i40iw_qp *to_iwqp(struct ib_qp *ibqp)
-{
- return container_of(ibqp, struct i40iw_qp, ibqp);
-}
-
-/* i40iw.c */
-void i40iw_qp_add_ref(struct ib_qp *ibqp);
-void i40iw_qp_rem_ref(struct ib_qp *ibqp);
-struct ib_qp *i40iw_get_qp(struct ib_device *, int);
-
-void i40iw_flush_wqes(struct i40iw_device *iwdev,
- struct i40iw_qp *qp);
-
-void i40iw_manage_arp_cache(struct i40iw_device *iwdev,
- unsigned char *mac_addr,
- u32 *ip_addr,
- bool ipv4,
- u32 action);
-
-int i40iw_manage_apbvt(struct i40iw_device *iwdev,
- u16 accel_local_port,
- bool add_port);
-
-struct i40iw_cqp_request *i40iw_get_cqp_request(struct i40iw_cqp *cqp, bool wait);
-void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request);
-void i40iw_put_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request);
-
-/**
- * i40iw_alloc_resource - allocate a resource
- * @iwdev: device pointer
- * @resource_array: resource bit array:
- * @max_resources: maximum resource number
- * @req_resources_num: Allocated resource number
- * @next: next free id
- **/
-static inline int i40iw_alloc_resource(struct i40iw_device *iwdev,
- unsigned long *resource_array,
- u32 max_resources,
- u32 *req_resource_num,
- u32 *next)
-{
- u32 resource_num;
- unsigned long flags;
-
- spin_lock_irqsave(&iwdev->resource_lock, flags);
- resource_num = find_next_zero_bit(resource_array, max_resources, *next);
- if (resource_num >= max_resources) {
- resource_num = find_first_zero_bit(resource_array, max_resources);
- if (resource_num >= max_resources) {
- spin_unlock_irqrestore(&iwdev->resource_lock, flags);
- return -EOVERFLOW;
- }
- }
- set_bit(resource_num, resource_array);
- *next = resource_num + 1;
- if (*next == max_resources)
- *next = 0;
- *req_resource_num = resource_num;
- spin_unlock_irqrestore(&iwdev->resource_lock, flags);
-
- return 0;
-}
-
-/**
- * i40iw_is_resource_allocated - detrmine if resource is
- * allocated
- * @iwdev: device pointer
- * @resource_array: resource array for the resource_num
- * @resource_num: resource number to check
- **/
-static inline bool i40iw_is_resource_allocated(struct i40iw_device *iwdev,
- unsigned long *resource_array,
- u32 resource_num)
-{
- bool bit_is_set;
- unsigned long flags;
-
- spin_lock_irqsave(&iwdev->resource_lock, flags);
-
- bit_is_set = test_bit(resource_num, resource_array);
- spin_unlock_irqrestore(&iwdev->resource_lock, flags);
-
- return bit_is_set;
-}
-
-/**
- * i40iw_free_resource - free a resource
- * @iwdev: device pointer
- * @resource_array: resource array for the resource_num
- * @resource_num: resource number to free
- **/
-static inline void i40iw_free_resource(struct i40iw_device *iwdev,
- unsigned long *resource_array,
- u32 resource_num)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&iwdev->resource_lock, flags);
- clear_bit(resource_num, resource_array);
- spin_unlock_irqrestore(&iwdev->resource_lock, flags);
-}
-
-struct i40iw_handler *i40iw_find_netdev(struct net_device *netdev);
-
-/**
- * iw_init_resources -
- */
-u32 i40iw_initialize_hw_resources(struct i40iw_device *iwdev);
-
-int i40iw_register_rdma_device(struct i40iw_device *iwdev);
-void i40iw_port_ibevent(struct i40iw_device *iwdev);
-void i40iw_cm_disconn(struct i40iw_qp *iwqp);
-void i40iw_cm_disconn_worker(void *);
-int mini_cm_recv_pkt(struct i40iw_cm_core *, struct i40iw_device *,
- struct sk_buff *);
-
-enum i40iw_status_code i40iw_handle_cqp_op(struct i40iw_device *iwdev,
- struct i40iw_cqp_request *cqp_request);
-enum i40iw_status_code i40iw_add_mac_addr(struct i40iw_device *iwdev,
- u8 *mac_addr, u8 *mac_index);
-int i40iw_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *);
-void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq);
-
-void i40iw_cleanup_pending_cqp_op(struct i40iw_device *iwdev);
-void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev);
-void i40iw_add_pdusecount(struct i40iw_pd *iwpd);
-void i40iw_rem_devusecount(struct i40iw_device *iwdev);
-void i40iw_add_devusecount(struct i40iw_device *iwdev);
-void i40iw_hw_modify_qp(struct i40iw_device *iwdev, struct i40iw_qp *iwqp,
- struct i40iw_modify_qp_info *info, bool wait);
-
-void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev,
- struct i40iw_sc_qp *qp,
- bool suspend);
-enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
- struct i40iw_cm_info *cminfo,
- enum i40iw_quad_entry_type etype,
- enum i40iw_quad_hash_manage_type mtype,
- void *cmnode,
- bool wait);
-void i40iw_receive_ilq(struct i40iw_sc_vsi *vsi, struct i40iw_puda_buf *rbuf);
-void i40iw_free_sqbuf(struct i40iw_sc_vsi *vsi, void *bufp);
-void i40iw_free_qp_resources(struct i40iw_qp *iwqp);
-
-enum i40iw_status_code i40iw_obj_aligned_mem(struct i40iw_device *iwdev,
- struct i40iw_dma_mem *memptr,
- u32 size, u32 mask);
-
-void i40iw_request_reset(struct i40iw_device *iwdev);
-void i40iw_destroy_rdma_device(struct i40iw_ib_device *iwibdev);
-int i40iw_setup_cm_core(struct i40iw_device *iwdev);
-void i40iw_cleanup_cm_core(struct i40iw_cm_core *cm_core);
-void i40iw_process_ceq(struct i40iw_device *, struct i40iw_ceq *iwceq);
-void i40iw_process_aeq(struct i40iw_device *);
-void i40iw_next_iw_state(struct i40iw_qp *iwqp,
- u8 state, u8 del_hash,
- u8 term, u8 term_len);
-int i40iw_send_syn(struct i40iw_cm_node *cm_node, u32 sendack);
-int i40iw_send_reset(struct i40iw_cm_node *cm_node);
-struct i40iw_cm_node *i40iw_find_node(struct i40iw_cm_core *cm_core,
- u16 rem_port,
- u32 *rem_addr,
- u16 loc_port,
- u32 *loc_addr,
- bool add_refcnt,
- bool accelerated_list);
-
-enum i40iw_status_code i40iw_hw_flush_wqes(struct i40iw_device *iwdev,
- struct i40iw_sc_qp *qp,
- struct i40iw_qp_flush_info *info,
- bool wait);
-
-void i40iw_gen_ae(struct i40iw_device *iwdev,
- struct i40iw_sc_qp *qp,
- struct i40iw_gen_ae_info *info,
- bool wait);
-
-void i40iw_copy_ip_ntohl(u32 *dst, __be32 *src);
-struct ib_mr *i40iw_reg_phys_mr(struct ib_pd *ib_pd,
- u64 addr,
- u64 size,
- int acc,
- u64 *iova_start);
-
-int i40iw_inetaddr_event(struct notifier_block *notifier,
- unsigned long event,
- void *ptr);
-int i40iw_inet6addr_event(struct notifier_block *notifier,
- unsigned long event,
- void *ptr);
-int i40iw_net_event(struct notifier_block *notifier,
- unsigned long event,
- void *ptr);
-int i40iw_netdevice_event(struct notifier_block *notifier,
- unsigned long event,
- void *ptr);
-
-#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
deleted file mode 100644
index 2450b7dd51f6..000000000000
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ /dev/null
@@ -1,4419 +0,0 @@
-/*******************************************************************************
-*
-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
-*
-* This software is available to you under a choice of one of two
-* licenses. You may choose to be licensed under the terms of the GNU
-* General Public License (GPL) Version 2, available from the file
-* COPYING in the main directory of this source tree, or the
-* OpenFabrics.org BSD license below:
-*
-* Redistribution and use in source and binary forms, with or
-* without modification, are permitted provided that the following
-* conditions are met:
-*
-* - Redistributions of source code must retain the above
-* copyright notice, this list of conditions and the following
-* disclaimer.
-*
-* - Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials
-* provided with the distribution.
-*
-* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-* SOFTWARE.
-*
-*******************************************************************************/
-
-#include <linux/atomic.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <linux/init.h>
-#include <linux/if_arp.h>
-#include <linux/if_vlan.h>
-#include <linux/notifier.h>
-#include <linux/net.h>
-#include <linux/types.h>
-#include <linux/timer.h>
-#include <linux/time.h>
-#include <linux/delay.h>
-#include <linux/etherdevice.h>
-#include <linux/netdevice.h>
-#include <linux/random.h>
-#include <linux/list.h>
-#include <linux/threads.h>
-#include <linux/highmem.h>
-#include <net/arp.h>
-#include <net/ndisc.h>
-#include <net/neighbour.h>
-#include <net/route.h>
-#include <net/addrconf.h>
-#include <net/ip6_route.h>
-#include <net/ip_fib.h>
-#include <net/secure_seq.h>
-#include <net/tcp.h>
-#include <asm/checksum.h>
-
-#include "i40iw.h"
-
-static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *);
-static void i40iw_cm_post_event(struct i40iw_cm_event *event);
-static void i40iw_disconnect_worker(struct work_struct *work);
-
-/**
- * i40iw_free_sqbuf - put back puda buffer if refcount = 0
- * @vsi: pointer to vsi structure
- * @bufp: puda buffer to free
- */
-void i40iw_free_sqbuf(struct i40iw_sc_vsi *vsi, void *bufp)
-{
- struct i40iw_puda_buf *buf = (struct i40iw_puda_buf *)bufp;
- struct i40iw_puda_rsrc *ilq = vsi->ilq;
-
- if (!atomic_dec_return(&buf->refcount))
- i40iw_puda_ret_bufpool(ilq, buf);
-}
-
-/**
- * i40iw_derive_hw_ird_setting - Calculate IRD
- *
- * @cm_ird: IRD of connection's node
- *
- * The ird from the connection is rounded to a supported HW
- * setting (2,8,32,64) and then encoded for ird_size field of
- * qp_ctx
- */
-static u8 i40iw_derive_hw_ird_setting(u16 cm_ird)
-{
- u8 encoded_ird_size;
-
- /* ird_size field is encoded in qp_ctx */
- switch (cm_ird ? roundup_pow_of_two(cm_ird) : 0) {
- case I40IW_HW_IRD_SETTING_64:
- encoded_ird_size = 3;
- break;
- case I40IW_HW_IRD_SETTING_32:
- case I40IW_HW_IRD_SETTING_16:
- encoded_ird_size = 2;
- break;
- case I40IW_HW_IRD_SETTING_8:
- case I40IW_HW_IRD_SETTING_4:
- encoded_ird_size = 1;
- break;
- case I40IW_HW_IRD_SETTING_2:
- default:
- encoded_ird_size = 0;
- break;
- }
- return encoded_ird_size;
-}
-
-/**
- * i40iw_record_ird_ord - Record IRD/ORD passed in
- * @cm_node: connection's node
- * @conn_ird: connection IRD
- * @conn_ord: connection ORD
- */
-static void i40iw_record_ird_ord(struct i40iw_cm_node *cm_node, u32 conn_ird,
- u32 conn_ord)
-{
- if (conn_ird > I40IW_MAX_IRD_SIZE)
- conn_ird = I40IW_MAX_IRD_SIZE;
-
- if (conn_ord > I40IW_MAX_ORD_SIZE)
- conn_ord = I40IW_MAX_ORD_SIZE;
- else if (!conn_ord && cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO)
- conn_ord = 1;
-
- cm_node->ird_size = conn_ird;
- cm_node->ord_size = conn_ord;
-}
-
-/**
- * i40iw_copy_ip_ntohl - change network to host ip
- * @dst: host ip
- * @src: big endian
- */
-void i40iw_copy_ip_ntohl(u32 *dst, __be32 *src)
-{
- *dst++ = ntohl(*src++);
- *dst++ = ntohl(*src++);
- *dst++ = ntohl(*src++);
- *dst = ntohl(*src);
-}
-
-/**
- * i40iw_copy_ip_htonl - change host addr to network ip
- * @dst: host ip
- * @src: little endian
- */
-static inline void i40iw_copy_ip_htonl(__be32 *dst, u32 *src)
-{
- *dst++ = htonl(*src++);
- *dst++ = htonl(*src++);
- *dst++ = htonl(*src++);
- *dst = htonl(*src);
-}
-
-/**
- * i40iw_fill_sockaddr4 - get addr info for passive connection
- * @cm_node: connection's node
- * @event: upper layer's cm event
- */
-static inline void i40iw_fill_sockaddr4(struct i40iw_cm_node *cm_node,
- struct iw_cm_event *event)
-{
- struct sockaddr_in *laddr = (struct sockaddr_in *)&event->local_addr;
- struct sockaddr_in *raddr = (struct sockaddr_in *)&event->remote_addr;
-
- laddr->sin_family = AF_INET;
- raddr->sin_family = AF_INET;
-
- laddr->sin_port = htons(cm_node->loc_port);
- raddr->sin_port = htons(cm_node->rem_port);
-
- laddr->sin_addr.s_addr = htonl(cm_node->loc_addr[0]);
- raddr->sin_addr.s_addr = htonl(cm_node->rem_addr[0]);
-}
-
-/**
- * i40iw_fill_sockaddr6 - get ipv6 addr info for passive side
- * @cm_node: connection's node
- * @event: upper layer's cm event
- */
-static inline void i40iw_fill_sockaddr6(struct i40iw_cm_node *cm_node,
- struct iw_cm_event *event)
-{
- struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)&event->local_addr;
- struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)&event->remote_addr;
-
- laddr6->sin6_family = AF_INET6;
- raddr6->sin6_family = AF_INET6;
-
- laddr6->sin6_port = htons(cm_node->loc_port);
- raddr6->sin6_port = htons(cm_node->rem_port);
-
- i40iw_copy_ip_htonl(laddr6->sin6_addr.in6_u.u6_addr32,
- cm_node->loc_addr);
- i40iw_copy_ip_htonl(raddr6->sin6_addr.in6_u.u6_addr32,
- cm_node->rem_addr);
-}
-
-/**
- * i40iw_get_addr_info
- * @cm_node: contains ip/tcp info
- * @cm_info: to get a copy of the cm_node ip/tcp info
-*/
-static void i40iw_get_addr_info(struct i40iw_cm_node *cm_node,
- struct i40iw_cm_info *cm_info)
-{
- cm_info->ipv4 = cm_node->ipv4;
- cm_info->vlan_id = cm_node->vlan_id;
- memcpy(cm_info->loc_addr, cm_node->loc_addr, sizeof(cm_info->loc_addr));
- memcpy(cm_info->rem_addr, cm_node->rem_addr, sizeof(cm_info->rem_addr));
- cm_info->loc_port = cm_node->loc_port;
- cm_info->rem_port = cm_node->rem_port;
- cm_info->user_pri = cm_node->user_pri;
-}
-
-/**
- * i40iw_get_cmevent_info - for cm event upcall
- * @cm_node: connection's node
- * @cm_id: upper layers cm struct for the event
- * @event: upper layer's cm event
- */
-static inline void i40iw_get_cmevent_info(struct i40iw_cm_node *cm_node,
- struct iw_cm_id *cm_id,
- struct iw_cm_event *event)
-{
- memcpy(&event->local_addr, &cm_id->m_local_addr,
- sizeof(event->local_addr));
- memcpy(&event->remote_addr, &cm_id->m_remote_addr,
- sizeof(event->remote_addr));
- if (cm_node) {
- event->private_data = (void *)cm_node->pdata_buf;
- event->private_data_len = (u8)cm_node->pdata.size;
- event->ird = cm_node->ird_size;
- event->ord = cm_node->ord_size;
- }
-}
-
-/**
- * i40iw_send_cm_event - upcall cm's event handler
- * @cm_node: connection's node
- * @cm_id: upper layer's cm info struct
- * @type: Event type to indicate
- * @status: status for the event type
- */
-static int i40iw_send_cm_event(struct i40iw_cm_node *cm_node,
- struct iw_cm_id *cm_id,
- enum iw_cm_event_type type,
- int status)
-{
- struct iw_cm_event event;
-
- memset(&event, 0, sizeof(event));
- event.event = type;
- event.status = status;
- switch (type) {
- case IW_CM_EVENT_CONNECT_REQUEST:
- if (cm_node->ipv4)
- i40iw_fill_sockaddr4(cm_node, &event);
- else
- i40iw_fill_sockaddr6(cm_node, &event);
- event.provider_data = (void *)cm_node;
- event.private_data = (void *)cm_node->pdata_buf;
- event.private_data_len = (u8)cm_node->pdata.size;
- event.ird = cm_node->ird_size;
- break;
- case IW_CM_EVENT_CONNECT_REPLY:
- i40iw_get_cmevent_info(cm_node, cm_id, &event);
- break;
- case IW_CM_EVENT_ESTABLISHED:
- event.ird = cm_node->ird_size;
- event.ord = cm_node->ord_size;
- break;
- case IW_CM_EVENT_DISCONNECT:
- break;
- case IW_CM_EVENT_CLOSE:
- break;
- default:
- i40iw_pr_err("event type received type = %d\n", type);
- return -1;
- }
- return cm_id->event_handler(cm_id, &event);
-}
-
-/**
- * i40iw_create_event - create cm event
- * @cm_node: connection's node
- * @type: Event type to generate
- */
-static struct i40iw_cm_event *i40iw_create_event(struct i40iw_cm_node *cm_node,
- enum i40iw_cm_event_type type)
-{
- struct i40iw_cm_event *event;
-
- if (!cm_node->cm_id)
- return NULL;
-
- event = kzalloc(sizeof(*event), GFP_ATOMIC);
-
- if (!event)
- return NULL;
-
- event->type = type;
- event->cm_node = cm_node;
- memcpy(event->cm_info.rem_addr, cm_node->rem_addr, sizeof(event->cm_info.rem_addr));
- memcpy(event->cm_info.loc_addr, cm_node->loc_addr, sizeof(event->cm_info.loc_addr));
- event->cm_info.rem_port = cm_node->rem_port;
- event->cm_info.loc_port = cm_node->loc_port;
- event->cm_info.cm_id = cm_node->cm_id;
-
- i40iw_debug(cm_node->dev,
- I40IW_DEBUG_CM,
- "node=%p event=%p type=%u dst=%pI4 src=%pI4\n",
- cm_node,
- event,
- type,
- event->cm_info.loc_addr,
- event->cm_info.rem_addr);
-
- i40iw_cm_post_event(event);
- return event;
-}
-
-/**
- * i40iw_free_retrans_entry - free send entry
- * @cm_node: connection's node
- */
-static void i40iw_free_retrans_entry(struct i40iw_cm_node *cm_node)
-{
- struct i40iw_device *iwdev = cm_node->iwdev;
- struct i40iw_timer_entry *send_entry;
-
- send_entry = cm_node->send_entry;
- if (send_entry) {
- cm_node->send_entry = NULL;
- i40iw_free_sqbuf(&iwdev->vsi, (void *)send_entry->sqbuf);
- kfree(send_entry);
- atomic_dec(&cm_node->ref_count);
- }
-}
-
-/**
- * i40iw_cleanup_retrans_entry - free send entry with lock
- * @cm_node: connection's node
- */
-static void i40iw_cleanup_retrans_entry(struct i40iw_cm_node *cm_node)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
- i40iw_free_retrans_entry(cm_node);
- spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
-}
-
-/**
- * i40iw_form_cm_frame - get a free packet and build frame
- * @cm_node: connection's node ionfo to use in frame
- * @options: pointer to options info
- * @hdr: pointer mpa header
- * @pdata: pointer to private data
- * @flags: indicates FIN or ACK
- */
-static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
- struct i40iw_kmem_info *options,
- struct i40iw_kmem_info *hdr,
- struct i40iw_kmem_info *pdata,
- u8 flags)
-{
- struct i40iw_puda_buf *sqbuf;
- struct i40iw_sc_vsi *vsi = &cm_node->iwdev->vsi;
- u8 *buf;
-
- struct tcphdr *tcph;
- struct iphdr *iph;
- struct ipv6hdr *ip6h;
- struct ethhdr *ethh;
- u16 packetsize;
- u16 eth_hlen = ETH_HLEN;
- u32 opts_len = 0;
- u32 pd_len = 0;
- u32 hdr_len = 0;
- u16 vtag;
-
- sqbuf = i40iw_puda_get_bufpool(vsi->ilq);
- if (!sqbuf)
- return NULL;
- buf = sqbuf->mem.va;
-
- if (options)
- opts_len = (u32)options->size;
-
- if (hdr)
- hdr_len = hdr->size;
-
- if (pdata)
- pd_len = pdata->size;
-
- if (cm_node->vlan_id <= VLAN_VID_MASK)
- eth_hlen += 4;
-
- if (cm_node->ipv4)
- packetsize = sizeof(*iph) + sizeof(*tcph);
- else
- packetsize = sizeof(*ip6h) + sizeof(*tcph);
- packetsize += opts_len + hdr_len + pd_len;
-
- memset(buf, 0x00, eth_hlen + packetsize);
-
- sqbuf->totallen = packetsize + eth_hlen;
- sqbuf->maclen = eth_hlen;
- sqbuf->tcphlen = sizeof(*tcph) + opts_len;
- sqbuf->scratch = (void *)cm_node;
-
- ethh = (struct ethhdr *)buf;
- buf += eth_hlen;
-
- if (cm_node->ipv4) {
- sqbuf->ipv4 = true;
-
- iph = (struct iphdr *)buf;
- buf += sizeof(*iph);
- tcph = (struct tcphdr *)buf;
- buf += sizeof(*tcph);
-
- ether_addr_copy(ethh->h_dest, cm_node->rem_mac);
- ether_addr_copy(ethh->h_source, cm_node->loc_mac);
- if (cm_node->vlan_id <= VLAN_VID_MASK) {
- ((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q);
- vtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) | cm_node->vlan_id;
- ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(vtag);
-
- ((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto = htons(ETH_P_IP);
- } else {
- ethh->h_proto = htons(ETH_P_IP);
- }
-
- iph->version = IPVERSION;
- iph->ihl = 5; /* 5 * 4Byte words, IP headr len */
- iph->tos = cm_node->tos;
- iph->tot_len = htons(packetsize);
- iph->id = htons(++cm_node->tcp_cntxt.loc_id);
-
- iph->frag_off = htons(0x4000);
- iph->ttl = 0x40;
- iph->protocol = IPPROTO_TCP;
- iph->saddr = htonl(cm_node->loc_addr[0]);
- iph->daddr = htonl(cm_node->rem_addr[0]);
- } else {
- sqbuf->ipv4 = false;
- ip6h = (struct ipv6hdr *)buf;
- buf += sizeof(*ip6h);
- tcph = (struct tcphdr *)buf;
- buf += sizeof(*tcph);
-
- ether_addr_copy(ethh->h_dest, cm_node->rem_mac);
- ether_addr_copy(ethh->h_source, cm_node->loc_mac);
- if (cm_node->vlan_id <= VLAN_VID_MASK) {
- ((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q);
- vtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) | cm_node->vlan_id;
- ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(vtag);
- ((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto = htons(ETH_P_IPV6);
- } else {
- ethh->h_proto = htons(ETH_P_IPV6);
- }
- ip6h->version = 6;
- ip6h->priority = cm_node->tos >> 4;
- ip6h->flow_lbl[0] = cm_node->tos << 4;
- ip6h->flow_lbl[1] = 0;
- ip6h->flow_lbl[2] = 0;
- ip6h->payload_len = htons(packetsize - sizeof(*ip6h));
- ip6h->nexthdr = 6;
- ip6h->hop_limit = 128;
- i40iw_copy_ip_htonl(ip6h->saddr.in6_u.u6_addr32,
- cm_node->loc_addr);
- i40iw_copy_ip_htonl(ip6h->daddr.in6_u.u6_addr32,
- cm_node->rem_addr);
- }
-
- tcph->source = htons(cm_node->loc_port);
- tcph->dest = htons(cm_node->rem_port);
-
- tcph->seq = htonl(cm_node->tcp_cntxt.loc_seq_num);
-
- if (flags & SET_ACK) {
- cm_node->tcp_cntxt.loc_ack_num = cm_node->tcp_cntxt.rcv_nxt;
- tcph->ack_seq = htonl(cm_node->tcp_cntxt.loc_ack_num);
- tcph->ack = 1;
- } else {
- tcph->ack_seq = 0;
- }
-
- if (flags & SET_SYN) {
- cm_node->tcp_cntxt.loc_seq_num++;
- tcph->syn = 1;
- } else {
- cm_node->tcp_cntxt.loc_seq_num += hdr_len + pd_len;
- }
-
- if (flags & SET_FIN) {
- cm_node->tcp_cntxt.loc_seq_num++;
- tcph->fin = 1;
- }
-
- if (flags & SET_RST)
- tcph->rst = 1;
-
- tcph->doff = (u16)((sizeof(*tcph) + opts_len + 3) >> 2);
- sqbuf->tcphlen = tcph->doff << 2;
- tcph->window = htons(cm_node->tcp_cntxt.rcv_wnd);
- tcph->urg_ptr = 0;
-
- if (opts_len) {
- memcpy(buf, options->addr, opts_len);
- buf += opts_len;
- }
-
- if (hdr_len) {
- memcpy(buf, hdr->addr, hdr_len);
- buf += hdr_len;
- }
-
- if (pdata && pdata->addr)
- memcpy(buf, pdata->addr, pdata->size);
-
- atomic_set(&sqbuf->refcount, 1);
-
- return sqbuf;
-}
-
-/**
- * i40iw_send_reset - Send RST packet
- * @cm_node: connection's node
- */
-int i40iw_send_reset(struct i40iw_cm_node *cm_node)
-{
- struct i40iw_puda_buf *sqbuf;
- int flags = SET_RST | SET_ACK;
-
- sqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, flags);
- if (!sqbuf) {
- i40iw_pr_err("no sqbuf\n");
- return -1;
- }
-
- return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 0, 1);
-}
-
-/**
- * i40iw_active_open_err - send event for active side cm error
- * @cm_node: connection's node
- * @reset: Flag to send reset or not
- */
-static void i40iw_active_open_err(struct i40iw_cm_node *cm_node, bool reset)
-{
- i40iw_cleanup_retrans_entry(cm_node);
- cm_node->cm_core->stats_connect_errs++;
- if (reset) {
- i40iw_debug(cm_node->dev,
- I40IW_DEBUG_CM,
- "%s cm_node=%p state=%d\n",
- __func__,
- cm_node,
- cm_node->state);
- atomic_inc(&cm_node->ref_count);
- i40iw_send_reset(cm_node);
- }
-
- cm_node->state = I40IW_CM_STATE_CLOSED;
- i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED);
-}
-
-/**
- * i40iw_passive_open_err - handle passive side cm error
- * @cm_node: connection's node
- * @reset: send reset or just free cm_node
- */
-static void i40iw_passive_open_err(struct i40iw_cm_node *cm_node, bool reset)
-{
- i40iw_cleanup_retrans_entry(cm_node);
- cm_node->cm_core->stats_passive_errs++;
- cm_node->state = I40IW_CM_STATE_CLOSED;
- i40iw_debug(cm_node->dev,
- I40IW_DEBUG_CM,
- "%s cm_node=%p state =%d\n",
- __func__,
- cm_node,
- cm_node->state);
- if (reset)
- i40iw_send_reset(cm_node);
- else
- i40iw_rem_ref_cm_node(cm_node);
-}
-
-/**
- * i40iw_event_connect_error - to create connect error event
- * @event: cm information for connect event
- */
-static void i40iw_event_connect_error(struct i40iw_cm_event *event)
-{
- struct i40iw_qp *iwqp;
- struct iw_cm_id *cm_id;
-
- cm_id = event->cm_node->cm_id;
- if (!cm_id)
- return;
-
- iwqp = cm_id->provider_data;
-
- if (!iwqp || !iwqp->iwdev)
- return;
-
- iwqp->cm_id = NULL;
- cm_id->provider_data = NULL;
- i40iw_send_cm_event(event->cm_node, cm_id,
- IW_CM_EVENT_CONNECT_REPLY,
- -ECONNRESET);
- cm_id->rem_ref(cm_id);
- i40iw_rem_ref_cm_node(event->cm_node);
-}
-
-/**
- * i40iw_process_options
- * @cm_node: connection's node
- * @optionsloc: point to start of options
- * @optionsize: size of all options
- * @syn_packet: flag if syn packet
- */
-static int i40iw_process_options(struct i40iw_cm_node *cm_node,
- u8 *optionsloc,
- u32 optionsize,
- u32 syn_packet)
-{
- u32 tmp;
- u32 offset = 0;
- union all_known_options *all_options;
- char got_mss_option = 0;
-
- while (offset < optionsize) {
- all_options = (union all_known_options *)(optionsloc + offset);
- switch (all_options->as_base.optionnum) {
- case OPTION_NUMBER_END:
- offset = optionsize;
- break;
- case OPTION_NUMBER_NONE:
- offset += 1;
- continue;
- case OPTION_NUMBER_MSS:
- i40iw_debug(cm_node->dev,
- I40IW_DEBUG_CM,
- "%s: MSS Length: %d Offset: %d Size: %d\n",
- __func__,
- all_options->as_mss.length,
- offset,
- optionsize);
- got_mss_option = 1;
- if (all_options->as_mss.length != 4)
- return -1;
- tmp = ntohs(all_options->as_mss.mss);
- if (tmp > 0 && tmp < cm_node->tcp_cntxt.mss)
- cm_node->tcp_cntxt.mss = tmp;
- break;
- case OPTION_NUMBER_WINDOW_SCALE:
- cm_node->tcp_cntxt.snd_wscale =
- all_options->as_windowscale.shiftcount;
- break;
- default:
- i40iw_debug(cm_node->dev,
- I40IW_DEBUG_CM,
- "TCP Option not understood: %x\n",
- all_options->as_base.optionnum);
- break;
- }
- offset += all_options->as_base.length;
- }
- if (!got_mss_option && syn_packet)
- cm_node->tcp_cntxt.mss = I40IW_CM_DEFAULT_MSS;
- return 0;
-}
-
-/**
- * i40iw_handle_tcp_options -
- * @cm_node: connection's node
- * @tcph: pointer tcp header
- * @optionsize: size of options rcvd
- * @passive: active or passive flag
- */
-static int i40iw_handle_tcp_options(struct i40iw_cm_node *cm_node,
- struct tcphdr *tcph,
- int optionsize,
- int passive)
-{
- u8 *optionsloc = (u8 *)&tcph[1];
-
- if (optionsize) {
- if (i40iw_process_options(cm_node,
- optionsloc,
- optionsize,
- (u32)tcph->syn)) {
- i40iw_debug(cm_node->dev,
- I40IW_DEBUG_CM,
- "%s: Node %p, Sending RESET\n",
- __func__,
- cm_node);
- if (passive)
- i40iw_passive_open_err(cm_node, true);
- else
- i40iw_active_open_err(cm_node, true);
- return -1;
- }
- }
-
- cm_node->tcp_cntxt.snd_wnd = ntohs(tcph->window) <<
- cm_node->tcp_cntxt.snd_wscale;
-
- if (cm_node->tcp_cntxt.snd_wnd > cm_node->tcp_cntxt.max_snd_wnd)
- cm_node->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.snd_wnd;
- return 0;
-}
-
-/**
- * i40iw_build_mpa_v1 - build a MPA V1 frame
- * @cm_node: connection's node
- * @start_addr: MPA frame start address
- * @mpa_key: to do read0 or write0
- */
-static void i40iw_build_mpa_v1(struct i40iw_cm_node *cm_node,
- void *start_addr,
- u8 mpa_key)
-{
- struct ietf_mpa_v1 *mpa_frame = (struct ietf_mpa_v1 *)start_addr;
-
- switch (mpa_key) {
- case MPA_KEY_REQUEST:
- memcpy(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE);
- break;
- case MPA_KEY_REPLY:
- memcpy(mpa_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE);
- break;
- default:
- break;
- }
- mpa_frame->flags = IETF_MPA_FLAGS_CRC;
- mpa_frame->rev = cm_node->mpa_frame_rev;
- mpa_frame->priv_data_len = htons(cm_node->pdata.size);
-}
-
-/**
- * i40iw_build_mpa_v2 - build a MPA V2 frame
- * @cm_node: connection's node
- * @start_addr: buffer start address
- * @mpa_key: to do read0 or write0
- */
-static void i40iw_build_mpa_v2(struct i40iw_cm_node *cm_node,
- void *start_addr,
- u8 mpa_key)
-{
- struct ietf_mpa_v2 *mpa_frame = (struct ietf_mpa_v2 *)start_addr;
- struct ietf_rtr_msg *rtr_msg = &mpa_frame->rtr_msg;
- u16 ctrl_ird, ctrl_ord;
-
- /* initialize the upper 5 bytes of the frame */
- i40iw_build_mpa_v1(cm_node, start_addr, mpa_key);
- mpa_frame->flags |= IETF_MPA_V2_FLAG;
- mpa_frame->priv_data_len += htons(IETF_RTR_MSG_SIZE);
-
- /* initialize RTR msg */
- if (cm_node->mpav2_ird_ord == IETF_NO_IRD_ORD) {
- ctrl_ird = IETF_NO_IRD_ORD;
- ctrl_ord = IETF_NO_IRD_ORD;
- } else {
- ctrl_ird = (cm_node->ird_size > IETF_NO_IRD_ORD) ?
- IETF_NO_IRD_ORD : cm_node->ird_size;
- ctrl_ord = (cm_node->ord_size > IETF_NO_IRD_ORD) ?
- IETF_NO_IRD_ORD : cm_node->ord_size;
- }
-
- ctrl_ird |= IETF_PEER_TO_PEER;
-
- switch (mpa_key) {
- case MPA_KEY_REQUEST:
- ctrl_ord |= IETF_RDMA0_WRITE;
- ctrl_ord |= IETF_RDMA0_READ;
- break;
- case MPA_KEY_REPLY:
- switch (cm_node->send_rdma0_op) {
- case SEND_RDMA_WRITE_ZERO:
- ctrl_ord |= IETF_RDMA0_WRITE;
- break;
- case SEND_RDMA_READ_ZERO:
- ctrl_ord |= IETF_RDMA0_READ;
- break;
- }
- break;
- default:
- break;
- }
- rtr_msg->ctrl_ird = htons(ctrl_ird);
- rtr_msg->ctrl_ord = htons(ctrl_ord);
-}
-
-/**
- * i40iw_cm_build_mpa_frame - build mpa frame for mpa version 1 or version 2
- * @cm_node: connection's node
- * @mpa: mpa: data buffer
- * @mpa_key: to do read0 or write0
- */
-static int i40iw_cm_build_mpa_frame(struct i40iw_cm_node *cm_node,
- struct i40iw_kmem_info *mpa,
- u8 mpa_key)
-{
- int hdr_len = 0;
-
- switch (cm_node->mpa_frame_rev) {
- case IETF_MPA_V1:
- hdr_len = sizeof(struct ietf_mpa_v1);
- i40iw_build_mpa_v1(cm_node, mpa->addr, mpa_key);
- break;
- case IETF_MPA_V2:
- hdr_len = sizeof(struct ietf_mpa_v2);
- i40iw_build_mpa_v2(cm_node, mpa->addr, mpa_key);
- break;
- default:
- break;
- }
-
- return hdr_len;
-}
-
-/**
- * i40iw_send_mpa_request - active node send mpa request to passive node
- * @cm_node: connection's node
- */
-static int i40iw_send_mpa_request(struct i40iw_cm_node *cm_node)
-{
- struct i40iw_puda_buf *sqbuf;
-
- if (!cm_node) {
- i40iw_pr_err("cm_node == NULL\n");
- return -1;
- }
-
- cm_node->mpa_hdr.addr = &cm_node->mpa_frame;
- cm_node->mpa_hdr.size = i40iw_cm_build_mpa_frame(cm_node,
- &cm_node->mpa_hdr,
- MPA_KEY_REQUEST);
- if (!cm_node->mpa_hdr.size) {
- i40iw_pr_err("mpa size = %d\n", cm_node->mpa_hdr.size);
- return -1;
- }
-
- sqbuf = i40iw_form_cm_frame(cm_node,
- NULL,
- &cm_node->mpa_hdr,
- &cm_node->pdata,
- SET_ACK);
- if (!sqbuf) {
- i40iw_pr_err("sq_buf == NULL\n");
- return -1;
- }
- return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0);
-}
-
-/**
- * i40iw_send_mpa_reject -
- * @cm_node: connection's node
- * @pdata: reject data for connection
- * @plen: length of reject data
- */
-static int i40iw_send_mpa_reject(struct i40iw_cm_node *cm_node,
- const void *pdata,
- u8 plen)
-{
- struct i40iw_puda_buf *sqbuf;
- struct i40iw_kmem_info priv_info;
-
- cm_node->mpa_hdr.addr = &cm_node->mpa_frame;
- cm_node->mpa_hdr.size = i40iw_cm_build_mpa_frame(cm_node,
- &cm_node->mpa_hdr,
- MPA_KEY_REPLY);
-
- cm_node->mpa_frame.flags |= IETF_MPA_FLAGS_REJECT;
- priv_info.addr = (void *)pdata;
- priv_info.size = plen;
-
- sqbuf = i40iw_form_cm_frame(cm_node,
- NULL,
- &cm_node->mpa_hdr,
- &priv_info,
- SET_ACK | SET_FIN);
- if (!sqbuf) {
- i40iw_pr_err("no sqbuf\n");
- return -ENOMEM;
- }
- cm_node->state = I40IW_CM_STATE_FIN_WAIT1;
- return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0);
-}
-
-/**
- * i40iw_parse_mpa - process an IETF MPA frame
- * @cm_node: connection's node
- * @buffer: Data pointer
- * @type: to return accept or reject
- * @len: Len of mpa buffer
- */
-static int i40iw_parse_mpa(struct i40iw_cm_node *cm_node, u8 *buffer, u32 *type, u32 len)
-{
- struct ietf_mpa_v1 *mpa_frame;
- struct ietf_mpa_v2 *mpa_v2_frame;
- struct ietf_rtr_msg *rtr_msg;
- int mpa_hdr_len;
- int priv_data_len;
-
- *type = I40IW_MPA_REQUEST_ACCEPT;
-
- if (len < sizeof(struct ietf_mpa_v1)) {
- i40iw_pr_err("ietf buffer small (%x)\n", len);
- return -1;
- }
-
- mpa_frame = (struct ietf_mpa_v1 *)buffer;
- mpa_hdr_len = sizeof(struct ietf_mpa_v1);
- priv_data_len = ntohs(mpa_frame->priv_data_len);
-
- if (priv_data_len > IETF_MAX_PRIV_DATA_LEN) {
- i40iw_pr_err("large pri_data %d\n", priv_data_len);
- return -1;
- }
- if (mpa_frame->rev != IETF_MPA_V1 && mpa_frame->rev != IETF_MPA_V2) {
- i40iw_pr_err("unsupported mpa rev = %d\n", mpa_frame->rev);
- return -1;
- }
- if (mpa_frame->rev > cm_node->mpa_frame_rev) {
- i40iw_pr_err("rev %d\n", mpa_frame->rev);
- return -1;
- }
- cm_node->mpa_frame_rev = mpa_frame->rev;
-
- if (cm_node->state != I40IW_CM_STATE_MPAREQ_SENT) {
- if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE)) {
- i40iw_pr_err("Unexpected MPA Key received\n");
- return -1;
- }
- } else {
- if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE)) {
- i40iw_pr_err("Unexpected MPA Key received\n");
- return -1;
- }
- }
-
- if (priv_data_len + mpa_hdr_len > len) {
- i40iw_pr_err("ietf buffer len(%x + %x != %x)\n",
- priv_data_len, mpa_hdr_len, len);
- return -1;
- }
- if (len > MAX_CM_BUFFER) {
- i40iw_pr_err("ietf buffer large len = %d\n", len);
- return -1;
- }
-
- switch (mpa_frame->rev) {
- case IETF_MPA_V2:{
- u16 ird_size;
- u16 ord_size;
- u16 ctrl_ord;
- u16 ctrl_ird;
-
- mpa_v2_frame = (struct ietf_mpa_v2 *)buffer;
- mpa_hdr_len += IETF_RTR_MSG_SIZE;
- rtr_msg = &mpa_v2_frame->rtr_msg;
-
- /* parse rtr message */
- ctrl_ord = ntohs(rtr_msg->ctrl_ord);
- ctrl_ird = ntohs(rtr_msg->ctrl_ird);
- ird_size = ctrl_ird & IETF_NO_IRD_ORD;
- ord_size = ctrl_ord & IETF_NO_IRD_ORD;
-
- if (!(ctrl_ird & IETF_PEER_TO_PEER))
- return -1;
-
- if (ird_size == IETF_NO_IRD_ORD || ord_size == IETF_NO_IRD_ORD) {
- cm_node->mpav2_ird_ord = IETF_NO_IRD_ORD;
- goto negotiate_done;
- }
-
- if (cm_node->state != I40IW_CM_STATE_MPAREQ_SENT) {
- /* responder */
- if (!ord_size && (ctrl_ord & IETF_RDMA0_READ))
- cm_node->ird_size = 1;
- if (cm_node->ord_size > ird_size)
- cm_node->ord_size = ird_size;
- } else {
- /* initiator */
- if (!ird_size && (ctrl_ord & IETF_RDMA0_READ))
- return -1;
- if (cm_node->ord_size > ird_size)
- cm_node->ord_size = ird_size;
-
- if (cm_node->ird_size < ord_size)
- /* no resources available */
- return -1;
- }
-
-negotiate_done:
- if (ctrl_ord & IETF_RDMA0_READ)
- cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;
- else if (ctrl_ord & IETF_RDMA0_WRITE)
- cm_node->send_rdma0_op = SEND_RDMA_WRITE_ZERO;
- else /* Not supported RDMA0 operation */
- return -1;
- i40iw_debug(cm_node->dev, I40IW_DEBUG_CM,
- "MPAV2: Negotiated ORD: %d, IRD: %d\n",
- cm_node->ord_size, cm_node->ird_size);
- break;
- }
- break;
- case IETF_MPA_V1:
- default:
- break;
- }
-
- memcpy(cm_node->pdata_buf, buffer + mpa_hdr_len, priv_data_len);
- cm_node->pdata.size = priv_data_len;
-
- if (mpa_frame->flags & IETF_MPA_FLAGS_REJECT)
- *type = I40IW_MPA_REQUEST_REJECT;
-
- if (mpa_frame->flags & IETF_MPA_FLAGS_MARKERS)
- cm_node->snd_mark_en = true;
-
- return 0;
-}
-
-/**
- * i40iw_schedule_cm_timer
- * @cm_node: connection's node
- * @sqbuf: buffer to send
- * @type: if it is send or close
- * @send_retrans: if rexmits to be done
- * @close_when_complete: is cm_node to be removed
- *
- * note - cm_node needs to be protected before calling this. Encase in:
- * i40iw_rem_ref_cm_node(cm_core, cm_node);
- * i40iw_schedule_cm_timer(...)
- * atomic_inc(&cm_node->ref_count);
- */
-int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
- struct i40iw_puda_buf *sqbuf,
- enum i40iw_timer_type type,
- int send_retrans,
- int close_when_complete)
-{
- struct i40iw_sc_vsi *vsi = &cm_node->iwdev->vsi;
- struct i40iw_cm_core *cm_core = cm_node->cm_core;
- struct i40iw_timer_entry *new_send;
- int ret = 0;
- u32 was_timer_set;
- unsigned long flags;
-
- new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC);
- if (!new_send) {
- if (type != I40IW_TIMER_TYPE_CLOSE)
- i40iw_free_sqbuf(vsi, (void *)sqbuf);
- return -ENOMEM;
- }
- new_send->retrycount = I40IW_DEFAULT_RETRYS;
- new_send->retranscount = I40IW_DEFAULT_RETRANS;
- new_send->sqbuf = sqbuf;
- new_send->timetosend = jiffies;
- new_send->type = type;
- new_send->send_retrans = send_retrans;
- new_send->close_when_complete = close_when_complete;
-
- if (type == I40IW_TIMER_TYPE_CLOSE) {
- new_send->timetosend += (HZ / 10);
- if (cm_node->close_entry) {
- kfree(new_send);
- i40iw_pr_err("already close entry\n");
- return -EINVAL;
- }
- cm_node->close_entry = new_send;
- }
-
- if (type == I40IW_TIMER_TYPE_SEND) {
- spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
- cm_node->send_entry = new_send;
- atomic_inc(&cm_node->ref_count);
- spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
- new_send->timetosend = jiffies + I40IW_RETRY_TIMEOUT;
-
- atomic_inc(&sqbuf->refcount);
- i40iw_puda_send_buf(vsi->ilq, sqbuf);
- if (!send_retrans) {
- i40iw_cleanup_retrans_entry(cm_node);
- if (close_when_complete)
- i40iw_rem_ref_cm_node(cm_node);
- return ret;
- }
- }
-
- spin_lock_irqsave(&cm_core->ht_lock, flags);
- was_timer_set = timer_pending(&cm_core->tcp_timer);
-
- if (!was_timer_set) {
- cm_core->tcp_timer.expires = new_send->timetosend;
- add_timer(&cm_core->tcp_timer);
- }
- spin_unlock_irqrestore(&cm_core->ht_lock, flags);
-
- return ret;
-}
-
-/**
- * i40iw_retrans_expired - Could not rexmit the packet
- * @cm_node: connection's node
- */
-static void i40iw_retrans_expired(struct i40iw_cm_node *cm_node)
-{
- struct iw_cm_id *cm_id = cm_node->cm_id;
- enum i40iw_cm_node_state state = cm_node->state;
-
- cm_node->state = I40IW_CM_STATE_CLOSED;
- switch (state) {
- case I40IW_CM_STATE_SYN_RCVD:
- case I40IW_CM_STATE_CLOSING:
- i40iw_rem_ref_cm_node(cm_node);
- break;
- case I40IW_CM_STATE_FIN_WAIT1:
- case I40IW_CM_STATE_LAST_ACK:
- if (cm_node->cm_id)
- cm_id->rem_ref(cm_id);
- i40iw_send_reset(cm_node);
- break;
- default:
- atomic_inc(&cm_node->ref_count);
- i40iw_send_reset(cm_node);
- i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED);
- break;
- }
-}
-
-/**
- * i40iw_handle_close_entry - for handling retry/timeouts
- * @cm_node: connection's node
- * @rem_node: flag for remove cm_node
- */
-static void i40iw_handle_close_entry(struct i40iw_cm_node *cm_node, u32 rem_node)
-{
- struct i40iw_timer_entry *close_entry = cm_node->close_entry;
- struct iw_cm_id *cm_id = cm_node->cm_id;
- struct i40iw_qp *iwqp;
- unsigned long flags;
-
- if (!close_entry)
- return;
- iwqp = (struct i40iw_qp *)close_entry->sqbuf;
- if (iwqp) {
- spin_lock_irqsave(&iwqp->lock, flags);
- if (iwqp->cm_id) {
- iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED;
- iwqp->hw_iwarp_state = I40IW_QP_STATE_ERROR;
- iwqp->last_aeq = I40IW_AE_RESET_SENT;
- iwqp->ibqp_state = IB_QPS_ERR;
- spin_unlock_irqrestore(&iwqp->lock, flags);
- i40iw_cm_disconn(iwqp);
- } else {
- spin_unlock_irqrestore(&iwqp->lock, flags);
- }
- } else if (rem_node) {
- /* TIME_WAIT state */
- i40iw_rem_ref_cm_node(cm_node);
- }
- if (cm_id)
- cm_id->rem_ref(cm_id);
- kfree(close_entry);
- cm_node->close_entry = NULL;
-}
-
-/**
- * i40iw_build_timer_list - Add cm_nodes to timer list
- * @timer_list: ptr to timer list
- * @hte: ptr to accelerated or non-accelerated list
- */
-static void i40iw_build_timer_list(struct list_head *timer_list,
- struct list_head *hte)
-{
- struct i40iw_cm_node *cm_node;
- struct list_head *list_core_temp, *list_node;
-
- list_for_each_safe(list_node, list_core_temp, hte) {
- cm_node = container_of(list_node, struct i40iw_cm_node, list);
- if (cm_node->close_entry || cm_node->send_entry) {
- atomic_inc(&cm_node->ref_count);
- list_add(&cm_node->timer_entry, timer_list);
- }
- }
-}
-
-/**
- * i40iw_cm_timer_tick - system's timer expired callback
- * @t: Timer instance to fetch the cm_core pointer from
- */
-static void i40iw_cm_timer_tick(struct timer_list *t)
-{
- unsigned long nexttimeout = jiffies + I40IW_LONG_TIME;
- struct i40iw_cm_node *cm_node;
- struct i40iw_timer_entry *send_entry, *close_entry;
- struct list_head *list_core_temp;
- struct i40iw_sc_vsi *vsi;
- struct list_head *list_node;
- struct i40iw_cm_core *cm_core = from_timer(cm_core, t, tcp_timer);
- u32 settimer = 0;
- unsigned long timetosend;
- unsigned long flags;
-
- struct list_head timer_list;
-
- INIT_LIST_HEAD(&timer_list);
-
- spin_lock_irqsave(&cm_core->ht_lock, flags);
- i40iw_build_timer_list(&timer_list, &cm_core->non_accelerated_list);
- i40iw_build_timer_list(&timer_list, &cm_core->accelerated_list);
- spin_unlock_irqrestore(&cm_core->ht_lock, flags);
-
- list_for_each_safe(list_node, list_core_temp, &timer_list) {
- cm_node = container_of(list_node,
- struct i40iw_cm_node,
- timer_entry);
- close_entry = cm_node->close_entry;
-
- if (close_entry) {
- if (time_after(close_entry->timetosend, jiffies)) {
- if (nexttimeout > close_entry->timetosend ||
- !settimer) {
- nexttimeout = close_entry->timetosend;
- settimer = 1;
- }
- } else {
- i40iw_handle_close_entry(cm_node, 1);
- }
- }
-
- spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
-
- send_entry = cm_node->send_entry;
- if (!send_entry)
- goto done;
- if (time_after(send_entry->timetosend, jiffies)) {
- if (cm_node->state != I40IW_CM_STATE_OFFLOADED) {
- if ((nexttimeout > send_entry->timetosend) ||
- !settimer) {
- nexttimeout = send_entry->timetosend;
- settimer = 1;
- }
- } else {
- i40iw_free_retrans_entry(cm_node);
- }
- goto done;
- }
-
- if ((cm_node->state == I40IW_CM_STATE_OFFLOADED) ||
- (cm_node->state == I40IW_CM_STATE_CLOSED)) {
- i40iw_free_retrans_entry(cm_node);
- goto done;
- }
-
- if (!send_entry->retranscount || !send_entry->retrycount) {
- i40iw_free_retrans_entry(cm_node);
-
- spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
- i40iw_retrans_expired(cm_node);
- cm_node->state = I40IW_CM_STATE_CLOSED;
- spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
- goto done;
- }
- spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
-
- vsi = &cm_node->iwdev->vsi;
-
- if (!cm_node->ack_rcvd) {
- atomic_inc(&send_entry->sqbuf->refcount);
- i40iw_puda_send_buf(vsi->ilq, send_entry->sqbuf);
- cm_node->cm_core->stats_pkt_retrans++;
- }
- spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
- if (send_entry->send_retrans) {
- send_entry->retranscount--;
- timetosend = (I40IW_RETRY_TIMEOUT <<
- (I40IW_DEFAULT_RETRANS -
- send_entry->retranscount));
-
- send_entry->timetosend = jiffies +
- min(timetosend, I40IW_MAX_TIMEOUT);
- if (nexttimeout > send_entry->timetosend || !settimer) {
- nexttimeout = send_entry->timetosend;
- settimer = 1;
- }
- } else {
- int close_when_complete;
-
- close_when_complete = send_entry->close_when_complete;
- i40iw_debug(cm_node->dev,
- I40IW_DEBUG_CM,
- "cm_node=%p state=%d\n",
- cm_node,
- cm_node->state);
- i40iw_free_retrans_entry(cm_node);
- if (close_when_complete)
- i40iw_rem_ref_cm_node(cm_node);
- }
-done:
- spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
- i40iw_rem_ref_cm_node(cm_node);
- }
-
- if (settimer) {
- spin_lock_irqsave(&cm_core->ht_lock, flags);
- if (!timer_pending(&cm_core->tcp_timer)) {
- cm_core->tcp_timer.expires = nexttimeout;
- add_timer(&cm_core->tcp_timer);
- }
- spin_unlock_irqrestore(&cm_core->ht_lock, flags);
- }
-}
-
-/**
- * i40iw_send_syn - send SYN packet
- * @cm_node: connection's node
- * @sendack: flag to set ACK bit or not
- */
-int i40iw_send_syn(struct i40iw_cm_node *cm_node, u32 sendack)
-{
- struct i40iw_puda_buf *sqbuf;
- int flags = SET_SYN;
- char optionsbuffer[sizeof(struct option_mss) +
- sizeof(struct option_windowscale) +
- sizeof(struct option_base) + TCP_OPTIONS_PADDING];
- struct i40iw_kmem_info opts;
-
- int optionssize = 0;
- /* Sending MSS option */
- union all_known_options *options;
-
- opts.addr = optionsbuffer;
- if (!cm_node) {
- i40iw_pr_err("no cm_node\n");
- return -EINVAL;
- }
-
- options = (union all_known_options *)&optionsbuffer[optionssize];
- options->as_mss.optionnum = OPTION_NUMBER_MSS;
- options->as_mss.length = sizeof(struct option_mss);
- options->as_mss.mss = htons(cm_node->tcp_cntxt.mss);
- optionssize += sizeof(struct option_mss);
-
- options = (union all_known_options *)&optionsbuffer[optionssize];
- options->as_windowscale.optionnum = OPTION_NUMBER_WINDOW_SCALE;
- options->as_windowscale.length = sizeof(struct option_windowscale);
- options->as_windowscale.shiftcount = cm_node->tcp_cntxt.rcv_wscale;
- optionssize += sizeof(struct option_windowscale);
- options = (union all_known_options *)&optionsbuffer[optionssize];
- options->as_end = OPTION_NUMBER_END;
- optionssize += 1;
-
- if (sendack)
- flags |= SET_ACK;
-
- opts.size = optionssize;
-
- sqbuf = i40iw_form_cm_frame(cm_node, &opts, NULL, NULL, flags);
- if (!sqbuf) {
- i40iw_pr_err("no sqbuf\n");
- return -1;
- }
- return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0);
-}
-
-/**
- * i40iw_send_ack - Send ACK packet
- * @cm_node: connection's node
- */
-static void i40iw_send_ack(struct i40iw_cm_node *cm_node)
-{
- struct i40iw_puda_buf *sqbuf;
- struct i40iw_sc_vsi *vsi = &cm_node->iwdev->vsi;
-
- sqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, SET_ACK);
- if (sqbuf)
- i40iw_puda_send_buf(vsi->ilq, sqbuf);
- else
- i40iw_pr_err("no sqbuf\n");
-}
-
-/**
- * i40iw_send_fin - Send FIN pkt
- * @cm_node: connection's node
- */
-static int i40iw_send_fin(struct i40iw_cm_node *cm_node)
-{
- struct i40iw_puda_buf *sqbuf;
-
- sqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, SET_ACK | SET_FIN);
- if (!sqbuf) {
- i40iw_pr_err("no sqbuf\n");
- return -1;
- }
- return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0);
-}
-
-/**
- * i40iw_find_node - find a cm node that matches the reference cm node
- * @cm_core: cm's core
- * @rem_port: remote tcp port num
- * @rem_addr: remote ip addr
- * @loc_port: local tcp port num
- * @loc_addr: loc ip addr
- * @add_refcnt: flag to increment refcount of cm_node
- * @accelerated_list: flag for accelerated vs non-accelerated list to search
- */
-struct i40iw_cm_node *i40iw_find_node(struct i40iw_cm_core *cm_core,
- u16 rem_port,
- u32 *rem_addr,
- u16 loc_port,
- u32 *loc_addr,
- bool add_refcnt,
- bool accelerated_list)
-{
- struct list_head *hte;
- struct i40iw_cm_node *cm_node;
- unsigned long flags;
-
- hte = accelerated_list ?
- &cm_core->accelerated_list : &cm_core->non_accelerated_list;
-
- /* walk list and find cm_node associated with this session ID */
- spin_lock_irqsave(&cm_core->ht_lock, flags);
- list_for_each_entry(cm_node, hte, list) {
- if (!memcmp(cm_node->loc_addr, loc_addr, sizeof(cm_node->loc_addr)) &&
- (cm_node->loc_port == loc_port) &&
- !memcmp(cm_node->rem_addr, rem_addr, sizeof(cm_node->rem_addr)) &&
- (cm_node->rem_port == rem_port)) {
- if (add_refcnt)
- atomic_inc(&cm_node->ref_count);
- spin_unlock_irqrestore(&cm_core->ht_lock, flags);
- return cm_node;
- }
- }
- spin_unlock_irqrestore(&cm_core->ht_lock, flags);
-
- /* no owner node */
- return NULL;
-}
-
-/**
- * i40iw_find_listener - find a cm node listening on this addr-port pair
- * @cm_core: cm's core
- * @dst_port: listener tcp port num
- * @dst_addr: listener ip addr
- * @vlan_id: vlan id for the given address
- * @listener_state: state to match with listen node's
- */
-static struct i40iw_cm_listener *i40iw_find_listener(
- struct i40iw_cm_core *cm_core,
- u32 *dst_addr,
- u16 dst_port,
- u16 vlan_id,
- enum i40iw_cm_listener_state
- listener_state)
-{
- struct i40iw_cm_listener *listen_node;
- static const u32 ip_zero[4] = { 0, 0, 0, 0 };
- u32 listen_addr[4];
- u16 listen_port;
- unsigned long flags;
-
- /* walk list and find cm_node associated with this session ID */
- spin_lock_irqsave(&cm_core->listen_list_lock, flags);
- list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
- memcpy(listen_addr, listen_node->loc_addr, sizeof(listen_addr));
- listen_port = listen_node->loc_port;
- /* compare node pair, return node handle if a match */
- if ((!memcmp(listen_addr, dst_addr, sizeof(listen_addr)) ||
- !memcmp(listen_addr, ip_zero, sizeof(listen_addr))) &&
- (listen_port == dst_port) &&
- (listener_state & listen_node->listener_state)) {
- atomic_inc(&listen_node->ref_count);
- spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
- return listen_node;
- }
- }
- spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
- return NULL;
-}
-
-/**
- * i40iw_add_hte_node - add a cm node to the hash table
- * @cm_core: cm's core
- * @cm_node: connection's node
- */
-static void i40iw_add_hte_node(struct i40iw_cm_core *cm_core,
- struct i40iw_cm_node *cm_node)
-{
- unsigned long flags;
-
- if (!cm_node || !cm_core) {
- i40iw_pr_err("cm_node or cm_core == NULL\n");
- return;
- }
-
- spin_lock_irqsave(&cm_core->ht_lock, flags);
- list_add_tail(&cm_node->list, &cm_core->non_accelerated_list);
- spin_unlock_irqrestore(&cm_core->ht_lock, flags);
-}
-
-/**
- * i40iw_find_port - find port that matches reference port
- * @hte: ptr to accelerated or non-accelerated list
- * @port: port number to locate
- */
-static bool i40iw_find_port(struct list_head *hte, u16 port)
-{
- struct i40iw_cm_node *cm_node;
-
- list_for_each_entry(cm_node, hte, list) {
- if (cm_node->loc_port == port)
- return true;
- }
- return false;
-}
-
-/**
- * i40iw_port_in_use - determine if port is in use
- * @cm_core: cm's core
- * @port: port number
- */
-bool i40iw_port_in_use(struct i40iw_cm_core *cm_core, u16 port)
-{
- struct i40iw_cm_listener *listen_node;
- unsigned long flags;
-
- spin_lock_irqsave(&cm_core->ht_lock, flags);
- if (i40iw_find_port(&cm_core->accelerated_list, port) ||
- i40iw_find_port(&cm_core->non_accelerated_list, port)) {
- spin_unlock_irqrestore(&cm_core->ht_lock, flags);
- return true;
- }
- spin_unlock_irqrestore(&cm_core->ht_lock, flags);
-
- spin_lock_irqsave(&cm_core->listen_list_lock, flags);
- list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
- if (listen_node->loc_port == port) {
- spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
- return true;
- }
- }
- spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
-
- return false;
-}
-
-/**
- * i40iw_del_multiple_qhash - Remove qhash and child listens
- * @iwdev: iWarp device
- * @cm_info: CM info for parent listen node
- * @cm_parent_listen_node: The parent listen node
- */
-static enum i40iw_status_code i40iw_del_multiple_qhash(
- struct i40iw_device *iwdev,
- struct i40iw_cm_info *cm_info,
- struct i40iw_cm_listener *cm_parent_listen_node)
-{
- struct i40iw_cm_listener *child_listen_node;
- enum i40iw_status_code ret = I40IW_ERR_CONFIG;
- struct list_head *pos, *tpos;
- unsigned long flags;
-
- spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
- list_for_each_safe(pos, tpos, &cm_parent_listen_node->child_listen_list) {
- child_listen_node = list_entry(pos, struct i40iw_cm_listener, child_listen_list);
- if (child_listen_node->ipv4)
- i40iw_debug(&iwdev->sc_dev,
- I40IW_DEBUG_CM,
- "removing child listen for IP=%pI4, port=%d, vlan=%d\n",
- child_listen_node->loc_addr,
- child_listen_node->loc_port,
- child_listen_node->vlan_id);
- else
- i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
- "removing child listen for IP=%pI6, port=%d, vlan=%d\n",
- child_listen_node->loc_addr,
- child_listen_node->loc_port,
- child_listen_node->vlan_id);
- list_del(pos);
- memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
- sizeof(cm_info->loc_addr));
- cm_info->vlan_id = child_listen_node->vlan_id;
- if (child_listen_node->qhash_set) {
- ret = i40iw_manage_qhash(iwdev, cm_info,
- I40IW_QHASH_TYPE_TCP_SYN,
- I40IW_QHASH_MANAGE_TYPE_DELETE,
- NULL, false);
- child_listen_node->qhash_set = false;
- } else {
- ret = I40IW_SUCCESS;
- }
- i40iw_debug(&iwdev->sc_dev,
- I40IW_DEBUG_CM,
- "freed pointer = %p\n",
- child_listen_node);
- kfree(child_listen_node);
- cm_parent_listen_node->cm_core->stats_listen_nodes_destroyed++;
- }
- spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
-
- return ret;
-}
-
-/**
- * i40iw_netdev_vlan_ipv6 - Gets the netdev and vlan
- * @addr: local IPv6 address
- * @vlan_id: vlan id for the given IPv6 address
- *
- * Returns the net_device of the IPv6 address and also sets the
- * vlan id for that address.
- */
-static struct net_device *i40iw_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id)
-{
- struct net_device *ip_dev = NULL;
- struct in6_addr laddr6;
-
- if (!IS_ENABLED(CONFIG_IPV6))
- return NULL;
- i40iw_copy_ip_htonl(laddr6.in6_u.u6_addr32, addr);
- if (vlan_id)
- *vlan_id = I40IW_NO_VLAN;
- rcu_read_lock();
- for_each_netdev_rcu(&init_net, ip_dev) {
- if (ipv6_chk_addr(&init_net, &laddr6, ip_dev, 1)) {
- if (vlan_id)
- *vlan_id = rdma_vlan_dev_vlan_id(ip_dev);
- break;
- }
- }
- rcu_read_unlock();
- return ip_dev;
-}
-
-/**
- * i40iw_get_vlan_ipv4 - Returns the vlan_id for IPv4 address
- * @addr: local IPv4 address
- */
-static u16 i40iw_get_vlan_ipv4(u32 *addr)
-{
- struct net_device *netdev;
- u16 vlan_id = I40IW_NO_VLAN;
-
- netdev = ip_dev_find(&init_net, htonl(addr[0]));
- if (netdev) {
- vlan_id = rdma_vlan_dev_vlan_id(netdev);
- dev_put(netdev);
- }
- return vlan_id;
-}
-
-/**
- * i40iw_add_mqh_6 - Adds multiple qhashes for IPv6
- * @iwdev: iWarp device
- * @cm_info: CM info for parent listen node
- * @cm_parent_listen_node: The parent listen node
- *
- * Adds a qhash and a child listen node for every IPv6 address
- * on the adapter and adds the associated qhash filter
- */
-static enum i40iw_status_code i40iw_add_mqh_6(struct i40iw_device *iwdev,
- struct i40iw_cm_info *cm_info,
- struct i40iw_cm_listener *cm_parent_listen_node)
-{
- struct net_device *ip_dev;
- struct inet6_dev *idev;
- struct inet6_ifaddr *ifp, *tmp;
- enum i40iw_status_code ret = 0;
- struct i40iw_cm_listener *child_listen_node;
- unsigned long flags;
-
- rtnl_lock();
- for_each_netdev(&init_net, ip_dev) {
- if ((((rdma_vlan_dev_vlan_id(ip_dev) < I40IW_NO_VLAN) &&
- (rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev)) ||
- (ip_dev == iwdev->netdev)) && (ip_dev->flags & IFF_UP)) {
- idev = __in6_dev_get(ip_dev);
- if (!idev) {
- i40iw_pr_err("idev == NULL\n");
- break;
- }
- list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
- i40iw_debug(&iwdev->sc_dev,
- I40IW_DEBUG_CM,
- "IP=%pI6, vlan_id=%d, MAC=%pM\n",
- &ifp->addr,
- rdma_vlan_dev_vlan_id(ip_dev),
- ip_dev->dev_addr);
- child_listen_node =
- kzalloc(sizeof(*child_listen_node), GFP_ATOMIC);
- i40iw_debug(&iwdev->sc_dev,
- I40IW_DEBUG_CM,
- "Allocating child listener %p\n",
- child_listen_node);
- if (!child_listen_node) {
- ret = I40IW_ERR_NO_MEMORY;
- goto exit;
- }
- cm_info->vlan_id = rdma_vlan_dev_vlan_id(ip_dev);
- cm_parent_listen_node->vlan_id = cm_info->vlan_id;
-
- memcpy(child_listen_node, cm_parent_listen_node,
- sizeof(*child_listen_node));
-
- i40iw_copy_ip_ntohl(child_listen_node->loc_addr,
- ifp->addr.in6_u.u6_addr32);
- memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
- sizeof(cm_info->loc_addr));
-
- ret = i40iw_manage_qhash(iwdev, cm_info,
- I40IW_QHASH_TYPE_TCP_SYN,
- I40IW_QHASH_MANAGE_TYPE_ADD,
- NULL, true);
- if (!ret) {
- child_listen_node->qhash_set = true;
- spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
- list_add(&child_listen_node->child_listen_list,
- &cm_parent_listen_node->child_listen_list);
- spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
- cm_parent_listen_node->cm_core->stats_listen_nodes_created++;
- } else {
- kfree(child_listen_node);
- }
- }
- }
- }
-exit:
- rtnl_unlock();
- return ret;
-}
-
-/**
- * i40iw_add_mqh_4 - Adds multiple qhashes for IPv4
- * @iwdev: iWarp device
- * @cm_info: CM info for parent listen node
- * @cm_parent_listen_node: The parent listen node
- *
- * Adds a qhash and a child listen node for every IPv4 address
- * on the adapter and adds the associated qhash filter
- */
-static enum i40iw_status_code i40iw_add_mqh_4(
- struct i40iw_device *iwdev,
- struct i40iw_cm_info *cm_info,
- struct i40iw_cm_listener *cm_parent_listen_node)
-{
- struct net_device *dev;
- struct in_device *idev;
- struct i40iw_cm_listener *child_listen_node;
- enum i40iw_status_code ret = 0;
- unsigned long flags;
-
- rtnl_lock();
- for_each_netdev(&init_net, dev) {
- if ((((rdma_vlan_dev_vlan_id(dev) < I40IW_NO_VLAN) &&
- (rdma_vlan_dev_real_dev(dev) == iwdev->netdev)) ||
- (dev == iwdev->netdev)) && (dev->flags & IFF_UP)) {
- const struct in_ifaddr *ifa;
-
- idev = in_dev_get(dev);
-
- in_dev_for_each_ifa_rtnl(ifa, idev) {
- i40iw_debug(&iwdev->sc_dev,
- I40IW_DEBUG_CM,
- "Allocating child CM Listener forIP=%pI4, vlan_id=%d, MAC=%pM\n",
- &ifa->ifa_address,
- rdma_vlan_dev_vlan_id(dev),
- dev->dev_addr);
- child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_KERNEL);
- cm_parent_listen_node->cm_core->stats_listen_nodes_created++;
- i40iw_debug(&iwdev->sc_dev,
- I40IW_DEBUG_CM,
- "Allocating child listener %p\n",
- child_listen_node);
- if (!child_listen_node) {
- in_dev_put(idev);
- ret = I40IW_ERR_NO_MEMORY;
- goto exit;
- }
- cm_info->vlan_id = rdma_vlan_dev_vlan_id(dev);
- cm_parent_listen_node->vlan_id = cm_info->vlan_id;
- memcpy(child_listen_node,
- cm_parent_listen_node,
- sizeof(*child_listen_node));
-
- child_listen_node->loc_addr[0] = ntohl(ifa->ifa_address);
- memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
- sizeof(cm_info->loc_addr));
-
- ret = i40iw_manage_qhash(iwdev,
- cm_info,
- I40IW_QHASH_TYPE_TCP_SYN,
- I40IW_QHASH_MANAGE_TYPE_ADD,
- NULL,
- true);
- if (!ret) {
- child_listen_node->qhash_set = true;
- spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
- list_add(&child_listen_node->child_listen_list,
- &cm_parent_listen_node->child_listen_list);
- spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
- } else {
- kfree(child_listen_node);
- cm_parent_listen_node->cm_core->stats_listen_nodes_created--;
- }
- }
-
- in_dev_put(idev);
- }
- }
-exit:
- rtnl_unlock();
- return ret;
-}
-
-/**
- * i40iw_dec_refcnt_listen - delete listener and associated cm nodes
- * @cm_core: cm's core
- * @listener: passive connection's listener
- * @free_hanging_nodes: to free associated cm_nodes
- * @apbvt_del: flag to delete the apbvt
- */
-static int i40iw_dec_refcnt_listen(struct i40iw_cm_core *cm_core,
- struct i40iw_cm_listener *listener,
- int free_hanging_nodes, bool apbvt_del)
-{
- int ret = -EINVAL;
- int err = 0;
- struct list_head *list_pos;
- struct list_head *list_temp;
- struct i40iw_cm_node *cm_node;
- struct list_head reset_list;
- struct i40iw_cm_info nfo;
- struct i40iw_cm_node *loopback;
- enum i40iw_cm_node_state old_state;
- unsigned long flags;
-
- /* free non-accelerated child nodes for this listener */
- INIT_LIST_HEAD(&reset_list);
- if (free_hanging_nodes) {
- spin_lock_irqsave(&cm_core->ht_lock, flags);
- list_for_each_safe(list_pos,
- list_temp, &cm_core->non_accelerated_list) {
- cm_node = container_of(list_pos, struct i40iw_cm_node, list);
- if ((cm_node->listener == listener) &&
- !cm_node->accelerated) {
- atomic_inc(&cm_node->ref_count);
- list_add(&cm_node->reset_entry, &reset_list);
- }
- }
- spin_unlock_irqrestore(&cm_core->ht_lock, flags);
- }
-
- list_for_each_safe(list_pos, list_temp, &reset_list) {
- cm_node = container_of(list_pos, struct i40iw_cm_node, reset_entry);
- loopback = cm_node->loopbackpartner;
- if (cm_node->state >= I40IW_CM_STATE_FIN_WAIT1) {
- i40iw_rem_ref_cm_node(cm_node);
- } else {
- if (!loopback) {
- i40iw_cleanup_retrans_entry(cm_node);
- err = i40iw_send_reset(cm_node);
- if (err) {
- cm_node->state = I40IW_CM_STATE_CLOSED;
- i40iw_pr_err("send reset\n");
- } else {
- old_state = cm_node->state;
- cm_node->state = I40IW_CM_STATE_LISTENER_DESTROYED;
- if (old_state != I40IW_CM_STATE_MPAREQ_RCVD)
- i40iw_rem_ref_cm_node(cm_node);
- }
- } else {
- struct i40iw_cm_event event;
-
- event.cm_node = loopback;
- memcpy(event.cm_info.rem_addr,
- loopback->rem_addr, sizeof(event.cm_info.rem_addr));
- memcpy(event.cm_info.loc_addr,
- loopback->loc_addr, sizeof(event.cm_info.loc_addr));
- event.cm_info.rem_port = loopback->rem_port;
- event.cm_info.loc_port = loopback->loc_port;
- event.cm_info.cm_id = loopback->cm_id;
- event.cm_info.ipv4 = loopback->ipv4;
- atomic_inc(&loopback->ref_count);
- loopback->state = I40IW_CM_STATE_CLOSED;
- i40iw_event_connect_error(&event);
- cm_node->state = I40IW_CM_STATE_LISTENER_DESTROYED;
- i40iw_rem_ref_cm_node(cm_node);
- }
- }
- }
-
- if (!atomic_dec_return(&listener->ref_count)) {
- spin_lock_irqsave(&cm_core->listen_list_lock, flags);
- list_del(&listener->list);
- spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
-
- if (listener->iwdev) {
- if (apbvt_del)
- i40iw_manage_apbvt(listener->iwdev,
- listener->loc_port,
- I40IW_MANAGE_APBVT_DEL);
-
- memcpy(nfo.loc_addr, listener->loc_addr, sizeof(nfo.loc_addr));
- nfo.loc_port = listener->loc_port;
- nfo.ipv4 = listener->ipv4;
- nfo.vlan_id = listener->vlan_id;
- nfo.user_pri = listener->user_pri;
-
- if (!list_empty(&listener->child_listen_list)) {
- i40iw_del_multiple_qhash(listener->iwdev, &nfo, listener);
- } else {
- if (listener->qhash_set)
- i40iw_manage_qhash(listener->iwdev,
- &nfo,
- I40IW_QHASH_TYPE_TCP_SYN,
- I40IW_QHASH_MANAGE_TYPE_DELETE,
- NULL,
- false);
- }
- }
-
- cm_core->stats_listen_destroyed++;
- kfree(listener);
- cm_core->stats_listen_nodes_destroyed++;
- listener = NULL;
- ret = 0;
- }
-
- if (listener) {
- if (atomic_read(&listener->pend_accepts_cnt) > 0)
- i40iw_debug(cm_core->dev,
- I40IW_DEBUG_CM,
- "%s: listener (%p) pending accepts=%u\n",
- __func__,
- listener,
- atomic_read(&listener->pend_accepts_cnt));
- }
-
- return ret;
-}
-
-/**
- * i40iw_cm_del_listen - delete a linstener
- * @cm_core: cm's core
- * @listener: passive connection's listener
- * @apbvt_del: flag to delete apbvt
- */
-static int i40iw_cm_del_listen(struct i40iw_cm_core *cm_core,
- struct i40iw_cm_listener *listener,
- bool apbvt_del)
-{
- listener->listener_state = I40IW_CM_LISTENER_PASSIVE_STATE;
- listener->cm_id = NULL; /* going to be destroyed pretty soon */
- return i40iw_dec_refcnt_listen(cm_core, listener, 1, apbvt_del);
-}
-
-/**
- * i40iw_addr_resolve_neigh - resolve neighbor address
- * @iwdev: iwarp device structure
- * @src_ip: local ip address
- * @dst_ip: remote ip address
- * @arpindex: if there is an arp entry
- */
-static int i40iw_addr_resolve_neigh(struct i40iw_device *iwdev,
- u32 src_ip,
- u32 dst_ip,
- int arpindex)
-{
- struct rtable *rt;
- struct neighbour *neigh;
- int rc = arpindex;
- __be32 dst_ipaddr = htonl(dst_ip);
- __be32 src_ipaddr = htonl(src_ip);
-
- rt = ip_route_output(&init_net, dst_ipaddr, src_ipaddr, 0, 0);
- if (IS_ERR(rt)) {
- i40iw_pr_err("ip_route_output\n");
- return rc;
- }
-
- neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr);
-
- rcu_read_lock();
- if (neigh) {
- if (neigh->nud_state & NUD_VALID) {
- if (arpindex >= 0) {
- if (ether_addr_equal(iwdev->arp_table[arpindex].mac_addr,
- neigh->ha))
- /* Mac address same as arp table */
- goto resolve_neigh_exit;
- i40iw_manage_arp_cache(iwdev,
- iwdev->arp_table[arpindex].mac_addr,
- &dst_ip,
- true,
- I40IW_ARP_DELETE);
- }
-
- i40iw_manage_arp_cache(iwdev, neigh->ha, &dst_ip, true, I40IW_ARP_ADD);
- rc = i40iw_arp_table(iwdev, &dst_ip, true, NULL, I40IW_ARP_RESOLVE);
- } else {
- neigh_event_send(neigh, NULL);
- }
- }
- resolve_neigh_exit:
-
- rcu_read_unlock();
- if (neigh)
- neigh_release(neigh);
-
- ip_rt_put(rt);
- return rc;
-}
-
-/*
- * i40iw_get_dst_ipv6
- */
-static struct dst_entry *i40iw_get_dst_ipv6(struct sockaddr_in6 *src_addr,
- struct sockaddr_in6 *dst_addr)
-{
- struct dst_entry *dst;
- struct flowi6 fl6;
-
- memset(&fl6, 0, sizeof(fl6));
- fl6.daddr = dst_addr->sin6_addr;
- fl6.saddr = src_addr->sin6_addr;
- if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
- fl6.flowi6_oif = dst_addr->sin6_scope_id;
-
- dst = ip6_route_output(&init_net, NULL, &fl6);
- return dst;
-}
-
-/**
- * i40iw_addr_resolve_neigh_ipv6 - resolve neighbor ipv6 address
- * @iwdev: iwarp device structure
- * @src: source ip address
- * @dest: remote ip address
- * @arpindex: if there is an arp entry
- */
-static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
- u32 *src,
- u32 *dest,
- int arpindex)
-{
- struct neighbour *neigh;
- int rc = arpindex;
- struct dst_entry *dst;
- struct sockaddr_in6 dst_addr;
- struct sockaddr_in6 src_addr;
-
- memset(&dst_addr, 0, sizeof(dst_addr));
- dst_addr.sin6_family = AF_INET6;
- i40iw_copy_ip_htonl(dst_addr.sin6_addr.in6_u.u6_addr32, dest);
- memset(&src_addr, 0, sizeof(src_addr));
- src_addr.sin6_family = AF_INET6;
- i40iw_copy_ip_htonl(src_addr.sin6_addr.in6_u.u6_addr32, src);
- dst = i40iw_get_dst_ipv6(&src_addr, &dst_addr);
- if (!dst || dst->error) {
- if (dst) {
- i40iw_pr_err("ip6_route_output returned dst->error = %d\n",
- dst->error);
- dst_release(dst);
- }
- return rc;
- }
-
- neigh = dst_neigh_lookup(dst, dst_addr.sin6_addr.in6_u.u6_addr32);
-
- rcu_read_lock();
- if (neigh) {
- i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, "dst_neigh_lookup MAC=%pM\n", neigh->ha);
- if (neigh->nud_state & NUD_VALID) {
- if (arpindex >= 0) {
- if (ether_addr_equal
- (iwdev->arp_table[arpindex].mac_addr,
- neigh->ha)) {
- /* Mac address same as in arp table */
- goto resolve_neigh_exit6;
- }
- i40iw_manage_arp_cache(iwdev,
- iwdev->arp_table[arpindex].mac_addr,
- dest,
- false,
- I40IW_ARP_DELETE);
- }
- i40iw_manage_arp_cache(iwdev,
- neigh->ha,
- dest,
- false,
- I40IW_ARP_ADD);
- rc = i40iw_arp_table(iwdev,
- dest,
- false,
- NULL,
- I40IW_ARP_RESOLVE);
- } else {
- neigh_event_send(neigh, NULL);
- }
- }
-
- resolve_neigh_exit6:
- rcu_read_unlock();
- if (neigh)
- neigh_release(neigh);
- dst_release(dst);
- return rc;
-}
-
-/**
- * i40iw_ipv4_is_loopback - check if loopback
- * @loc_addr: local addr to compare
- * @rem_addr: remote address
- */
-static bool i40iw_ipv4_is_loopback(u32 loc_addr, u32 rem_addr)
-{
- return ipv4_is_loopback(htonl(rem_addr)) || (loc_addr == rem_addr);
-}
-
-/**
- * i40iw_ipv6_is_loopback - check if loopback
- * @loc_addr: local addr to compare
- * @rem_addr: remote address
- */
-static bool i40iw_ipv6_is_loopback(u32 *loc_addr, u32 *rem_addr)
-{
- struct in6_addr raddr6;
-
- i40iw_copy_ip_htonl(raddr6.in6_u.u6_addr32, rem_addr);
- return !memcmp(loc_addr, rem_addr, 16) || ipv6_addr_loopback(&raddr6);
-}
-
-/**
- * i40iw_make_cm_node - create a new instance of a cm node
- * @cm_core: cm's core
- * @iwdev: iwarp device structure
- * @cm_info: quad info for connection
- * @listener: passive connection's listener
- */
-static struct i40iw_cm_node *i40iw_make_cm_node(
- struct i40iw_cm_core *cm_core,
- struct i40iw_device *iwdev,
- struct i40iw_cm_info *cm_info,
- struct i40iw_cm_listener *listener)
-{
- struct i40iw_cm_node *cm_node;
- int oldarpindex;
- int arpindex;
- struct net_device *netdev = iwdev->netdev;
-
- /* create an hte and cm_node for this instance */
- cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
- if (!cm_node)
- return NULL;
-
- /* set our node specific transport info */
- cm_node->ipv4 = cm_info->ipv4;
- cm_node->vlan_id = cm_info->vlan_id;
- if ((cm_node->vlan_id == I40IW_NO_VLAN) && iwdev->dcb)
- cm_node->vlan_id = 0;
- cm_node->tos = cm_info->tos;
- cm_node->user_pri = cm_info->user_pri;
- if (listener) {
- if (listener->tos != cm_info->tos)
- i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB,
- "application TOS[%d] and remote client TOS[%d] mismatch\n",
- listener->tos, cm_info->tos);
- cm_node->tos = max(listener->tos, cm_info->tos);
- cm_node->user_pri = rt_tos2priority(cm_node->tos);
- i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, "listener: TOS:[%d] UP:[%d]\n",
- cm_node->tos, cm_node->user_pri);
- }
- memcpy(cm_node->loc_addr, cm_info->loc_addr, sizeof(cm_node->loc_addr));
- memcpy(cm_node->rem_addr, cm_info->rem_addr, sizeof(cm_node->rem_addr));
- cm_node->loc_port = cm_info->loc_port;
- cm_node->rem_port = cm_info->rem_port;
-
- cm_node->mpa_frame_rev = iwdev->mpa_version;
- cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;
- cm_node->ird_size = I40IW_MAX_IRD_SIZE;
- cm_node->ord_size = I40IW_MAX_ORD_SIZE;
-
- cm_node->listener = listener;
- cm_node->cm_id = cm_info->cm_id;
- ether_addr_copy(cm_node->loc_mac, netdev->dev_addr);
- spin_lock_init(&cm_node->retrans_list_lock);
- cm_node->ack_rcvd = false;
-
- atomic_set(&cm_node->ref_count, 1);
- /* associate our parent CM core */
- cm_node->cm_core = cm_core;
- cm_node->tcp_cntxt.loc_id = I40IW_CM_DEF_LOCAL_ID;
- cm_node->tcp_cntxt.rcv_wscale = I40IW_CM_DEFAULT_RCV_WND_SCALE;
- cm_node->tcp_cntxt.rcv_wnd =
- I40IW_CM_DEFAULT_RCV_WND_SCALED >> I40IW_CM_DEFAULT_RCV_WND_SCALE;
- if (cm_node->ipv4) {
- cm_node->tcp_cntxt.loc_seq_num = secure_tcp_seq(htonl(cm_node->loc_addr[0]),
- htonl(cm_node->rem_addr[0]),
- htons(cm_node->loc_port),
- htons(cm_node->rem_port));
- cm_node->tcp_cntxt.mss = iwdev->vsi.mtu - I40IW_MTU_TO_MSS_IPV4;
- } else if (IS_ENABLED(CONFIG_IPV6)) {
- __be32 loc[4] = {
- htonl(cm_node->loc_addr[0]), htonl(cm_node->loc_addr[1]),
- htonl(cm_node->loc_addr[2]), htonl(cm_node->loc_addr[3])
- };
- __be32 rem[4] = {
- htonl(cm_node->rem_addr[0]), htonl(cm_node->rem_addr[1]),
- htonl(cm_node->rem_addr[2]), htonl(cm_node->rem_addr[3])
- };
- cm_node->tcp_cntxt.loc_seq_num = secure_tcpv6_seq(loc, rem,
- htons(cm_node->loc_port),
- htons(cm_node->rem_port));
- cm_node->tcp_cntxt.mss = iwdev->vsi.mtu - I40IW_MTU_TO_MSS_IPV6;
- }
-
- cm_node->iwdev = iwdev;
- cm_node->dev = &iwdev->sc_dev;
-
- if ((cm_node->ipv4 &&
- i40iw_ipv4_is_loopback(cm_node->loc_addr[0], cm_node->rem_addr[0])) ||
- (!cm_node->ipv4 && i40iw_ipv6_is_loopback(cm_node->loc_addr,
- cm_node->rem_addr))) {
- arpindex = i40iw_arp_table(iwdev,
- cm_node->rem_addr,
- false,
- NULL,
- I40IW_ARP_RESOLVE);
- } else {
- oldarpindex = i40iw_arp_table(iwdev,
- cm_node->rem_addr,
- false,
- NULL,
- I40IW_ARP_RESOLVE);
- if (cm_node->ipv4)
- arpindex = i40iw_addr_resolve_neigh(iwdev,
- cm_info->loc_addr[0],
- cm_info->rem_addr[0],
- oldarpindex);
- else if (IS_ENABLED(CONFIG_IPV6))
- arpindex = i40iw_addr_resolve_neigh_ipv6(iwdev,
- cm_info->loc_addr,
- cm_info->rem_addr,
- oldarpindex);
- else
- arpindex = -EINVAL;
- }
- if (arpindex < 0) {
- i40iw_pr_err("cm_node arpindex\n");
- kfree(cm_node);
- return NULL;
- }
- ether_addr_copy(cm_node->rem_mac, iwdev->arp_table[arpindex].mac_addr);
- i40iw_add_hte_node(cm_core, cm_node);
- cm_core->stats_nodes_created++;
- return cm_node;
-}
-
-/**
- * i40iw_rem_ref_cm_node - destroy an instance of a cm node
- * @cm_node: connection's node
- */
-static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *cm_node)
-{
- struct i40iw_cm_core *cm_core = cm_node->cm_core;
- struct i40iw_qp *iwqp;
- struct i40iw_cm_info nfo;
- unsigned long flags;
-
- spin_lock_irqsave(&cm_node->cm_core->ht_lock, flags);
- if (atomic_dec_return(&cm_node->ref_count)) {
- spin_unlock_irqrestore(&cm_node->cm_core->ht_lock, flags);
- return;
- }
- list_del(&cm_node->list);
- spin_unlock_irqrestore(&cm_node->cm_core->ht_lock, flags);
-
- /* if the node is destroyed before connection was accelerated */
- if (!cm_node->accelerated && cm_node->accept_pend) {
- pr_err("node destroyed before established\n");
- atomic_dec(&cm_node->listener->pend_accepts_cnt);
- }
- if (cm_node->close_entry)
- i40iw_handle_close_entry(cm_node, 0);
- if (cm_node->listener) {
- i40iw_dec_refcnt_listen(cm_core, cm_node->listener, 0, true);
- } else {
- if (cm_node->apbvt_set) {
- i40iw_manage_apbvt(cm_node->iwdev,
- cm_node->loc_port,
- I40IW_MANAGE_APBVT_DEL);
- cm_node->apbvt_set = 0;
- }
- i40iw_get_addr_info(cm_node, &nfo);
- if (cm_node->qhash_set) {
- i40iw_manage_qhash(cm_node->iwdev,
- &nfo,
- I40IW_QHASH_TYPE_TCP_ESTABLISHED,
- I40IW_QHASH_MANAGE_TYPE_DELETE,
- NULL,
- false);
- cm_node->qhash_set = 0;
- }
- }
-
- iwqp = cm_node->iwqp;
- if (iwqp) {
- iwqp->cm_node = NULL;
- i40iw_qp_rem_ref(&iwqp->ibqp);
- cm_node->iwqp = NULL;
- } else if (cm_node->qhash_set) {
- i40iw_get_addr_info(cm_node, &nfo);
- i40iw_manage_qhash(cm_node->iwdev,
- &nfo,
- I40IW_QHASH_TYPE_TCP_ESTABLISHED,
- I40IW_QHASH_MANAGE_TYPE_DELETE,
- NULL,
- false);
- cm_node->qhash_set = 0;
- }
-
- cm_node->cm_core->stats_nodes_destroyed++;
- kfree(cm_node);
-}
-
-/**
- * i40iw_handle_fin_pkt - FIN packet received
- * @cm_node: connection's node
- */
-static void i40iw_handle_fin_pkt(struct i40iw_cm_node *cm_node)
-{
- u32 ret;
-
- switch (cm_node->state) {
- case I40IW_CM_STATE_SYN_RCVD:
- case I40IW_CM_STATE_SYN_SENT:
- case I40IW_CM_STATE_ESTABLISHED:
- case I40IW_CM_STATE_MPAREJ_RCVD:
- cm_node->tcp_cntxt.rcv_nxt++;
- i40iw_cleanup_retrans_entry(cm_node);
- cm_node->state = I40IW_CM_STATE_LAST_ACK;
- i40iw_send_fin(cm_node);
- break;
- case I40IW_CM_STATE_MPAREQ_SENT:
- i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED);
- cm_node->tcp_cntxt.rcv_nxt++;
- i40iw_cleanup_retrans_entry(cm_node);
- cm_node->state = I40IW_CM_STATE_CLOSED;
- atomic_inc(&cm_node->ref_count);
- i40iw_send_reset(cm_node);
- break;
- case I40IW_CM_STATE_FIN_WAIT1:
- cm_node->tcp_cntxt.rcv_nxt++;
- i40iw_cleanup_retrans_entry(cm_node);
- cm_node->state = I40IW_CM_STATE_CLOSING;
- i40iw_send_ack(cm_node);
- /*
- * Wait for ACK as this is simultaneous close.
- * After we receive ACK, do not send anything.
- * Just rm the node.
- */
- break;
- case I40IW_CM_STATE_FIN_WAIT2:
- cm_node->tcp_cntxt.rcv_nxt++;
- i40iw_cleanup_retrans_entry(cm_node);
- cm_node->state = I40IW_CM_STATE_TIME_WAIT;
- i40iw_send_ack(cm_node);
- ret =
- i40iw_schedule_cm_timer(cm_node, NULL, I40IW_TIMER_TYPE_CLOSE, 1, 0);
- if (ret)
- i40iw_pr_err("node %p state = %d\n", cm_node, cm_node->state);
- break;
- case I40IW_CM_STATE_TIME_WAIT:
- cm_node->tcp_cntxt.rcv_nxt++;
- i40iw_cleanup_retrans_entry(cm_node);
- cm_node->state = I40IW_CM_STATE_CLOSED;
- i40iw_rem_ref_cm_node(cm_node);
- break;
- case I40IW_CM_STATE_OFFLOADED:
- default:
- i40iw_pr_err("bad state node %p state = %d\n", cm_node, cm_node->state);
- break;
- }
-}
-
-/**
- * i40iw_handle_rst_pkt - process received RST packet
- * @cm_node: connection's node
- * @rbuf: receive buffer
- */
-static void i40iw_handle_rst_pkt(struct i40iw_cm_node *cm_node,
- struct i40iw_puda_buf *rbuf)
-{
- i40iw_cleanup_retrans_entry(cm_node);
- switch (cm_node->state) {
- case I40IW_CM_STATE_SYN_SENT:
- case I40IW_CM_STATE_MPAREQ_SENT:
- switch (cm_node->mpa_frame_rev) {
- case IETF_MPA_V2:
- cm_node->mpa_frame_rev = IETF_MPA_V1;
- /* send a syn and goto syn sent state */
- cm_node->state = I40IW_CM_STATE_SYN_SENT;
- if (i40iw_send_syn(cm_node, 0))
- i40iw_active_open_err(cm_node, false);
- break;
- case IETF_MPA_V1:
- default:
- i40iw_active_open_err(cm_node, false);
- break;
- }
- break;
- case I40IW_CM_STATE_MPAREQ_RCVD:
- atomic_inc(&cm_node->passive_state);
- break;
- case I40IW_CM_STATE_ESTABLISHED:
- case I40IW_CM_STATE_SYN_RCVD:
- case I40IW_CM_STATE_LISTENING:
- i40iw_pr_err("Bad state state = %d\n", cm_node->state);
- i40iw_passive_open_err(cm_node, false);
- break;
- case I40IW_CM_STATE_OFFLOADED:
- i40iw_active_open_err(cm_node, false);
- break;
- case I40IW_CM_STATE_CLOSED:
- break;
- case I40IW_CM_STATE_FIN_WAIT2:
- case I40IW_CM_STATE_FIN_WAIT1:
- case I40IW_CM_STATE_LAST_ACK:
- cm_node->cm_id->rem_ref(cm_node->cm_id);
- fallthrough;
- case I40IW_CM_STATE_TIME_WAIT:
- cm_node->state = I40IW_CM_STATE_CLOSED;
- i40iw_rem_ref_cm_node(cm_node);
- break;
- default:
- break;
- }
-}
-
-/**
- * i40iw_handle_rcv_mpa - Process a recv'd mpa buffer
- * @cm_node: connection's node
- * @rbuf: receive buffer
- */
-static void i40iw_handle_rcv_mpa(struct i40iw_cm_node *cm_node,
- struct i40iw_puda_buf *rbuf)
-{
- int ret;
- int datasize = rbuf->datalen;
- u8 *dataloc = rbuf->data;
-
- enum i40iw_cm_event_type type = I40IW_CM_EVENT_UNKNOWN;
- u32 res_type;
-
- ret = i40iw_parse_mpa(cm_node, dataloc, &res_type, datasize);
- if (ret) {
- if (cm_node->state == I40IW_CM_STATE_MPAREQ_SENT)
- i40iw_active_open_err(cm_node, true);
- else
- i40iw_passive_open_err(cm_node, true);
- return;
- }
-
- switch (cm_node->state) {
- case I40IW_CM_STATE_ESTABLISHED:
- if (res_type == I40IW_MPA_REQUEST_REJECT)
- i40iw_pr_err("state for reject\n");
- cm_node->state = I40IW_CM_STATE_MPAREQ_RCVD;
- type = I40IW_CM_EVENT_MPA_REQ;
- i40iw_send_ack(cm_node); /* ACK received MPA request */
- atomic_set(&cm_node->passive_state,
- I40IW_PASSIVE_STATE_INDICATED);
- break;
- case I40IW_CM_STATE_MPAREQ_SENT:
- i40iw_cleanup_retrans_entry(cm_node);
- if (res_type == I40IW_MPA_REQUEST_REJECT) {
- type = I40IW_CM_EVENT_MPA_REJECT;
- cm_node->state = I40IW_CM_STATE_MPAREJ_RCVD;
- } else {
- type = I40IW_CM_EVENT_CONNECTED;
- cm_node->state = I40IW_CM_STATE_OFFLOADED;
- }
- i40iw_send_ack(cm_node);
- break;
- default:
- pr_err("%s wrong cm_node state =%d\n", __func__, cm_node->state);
- break;
- }
- i40iw_create_event(cm_node, type);
-}
-
-/**
- * i40iw_indicate_pkt_err - Send up err event to cm
- * @cm_node: connection's node
- */
-static void i40iw_indicate_pkt_err(struct i40iw_cm_node *cm_node)
-{
- switch (cm_node->state) {
- case I40IW_CM_STATE_SYN_SENT:
- case I40IW_CM_STATE_MPAREQ_SENT:
- i40iw_active_open_err(cm_node, true);
- break;
- case I40IW_CM_STATE_ESTABLISHED:
- case I40IW_CM_STATE_SYN_RCVD:
- i40iw_passive_open_err(cm_node, true);
- break;
- case I40IW_CM_STATE_OFFLOADED:
- default:
- break;
- }
-}
-
-/**
- * i40iw_check_syn - Check for error on received syn ack
- * @cm_node: connection's node
- * @tcph: pointer tcp header
- */
-static int i40iw_check_syn(struct i40iw_cm_node *cm_node, struct tcphdr *tcph)
-{
- int err = 0;
-
- if (ntohl(tcph->ack_seq) != cm_node->tcp_cntxt.loc_seq_num) {
- err = 1;
- i40iw_active_open_err(cm_node, true);
- }
- return err;
-}
-
-/**
- * i40iw_check_seq - check seq numbers if OK
- * @cm_node: connection's node
- * @tcph: pointer tcp header
- */
-static int i40iw_check_seq(struct i40iw_cm_node *cm_node, struct tcphdr *tcph)
-{
- int err = 0;
- u32 seq;
- u32 ack_seq;
- u32 loc_seq_num = cm_node->tcp_cntxt.loc_seq_num;
- u32 rcv_nxt = cm_node->tcp_cntxt.rcv_nxt;
- u32 rcv_wnd;
-
- seq = ntohl(tcph->seq);
- ack_seq = ntohl(tcph->ack_seq);
- rcv_wnd = cm_node->tcp_cntxt.rcv_wnd;
- if (ack_seq != loc_seq_num)
- err = -1;
- else if (!between(seq, rcv_nxt, (rcv_nxt + rcv_wnd)))
- err = -1;
- if (err) {
- i40iw_pr_err("seq number\n");
- i40iw_indicate_pkt_err(cm_node);
- }
- return err;
-}
-
-/**
- * i40iw_handle_syn_pkt - is for Passive node
- * @cm_node: connection's node
- * @rbuf: receive buffer
- */
-static void i40iw_handle_syn_pkt(struct i40iw_cm_node *cm_node,
- struct i40iw_puda_buf *rbuf)
-{
- struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
- int ret;
- u32 inc_sequence;
- int optionsize;
- struct i40iw_cm_info nfo;
-
- optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
- inc_sequence = ntohl(tcph->seq);
-
- switch (cm_node->state) {
- case I40IW_CM_STATE_SYN_SENT:
- case I40IW_CM_STATE_MPAREQ_SENT:
- /* Rcvd syn on active open connection */
- i40iw_active_open_err(cm_node, 1);
- break;
- case I40IW_CM_STATE_LISTENING:
- /* Passive OPEN */
- if (atomic_read(&cm_node->listener->pend_accepts_cnt) >
- cm_node->listener->backlog) {
- cm_node->cm_core->stats_backlog_drops++;
- i40iw_passive_open_err(cm_node, false);
- break;
- }
- ret = i40iw_handle_tcp_options(cm_node, tcph, optionsize, 1);
- if (ret) {
- i40iw_passive_open_err(cm_node, false);
- /* drop pkt */
- break;
- }
- cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1;
- cm_node->accept_pend = 1;
- atomic_inc(&cm_node->listener->pend_accepts_cnt);
-
- cm_node->state = I40IW_CM_STATE_SYN_RCVD;
- i40iw_get_addr_info(cm_node, &nfo);
- ret = i40iw_manage_qhash(cm_node->iwdev,
- &nfo,
- I40IW_QHASH_TYPE_TCP_ESTABLISHED,
- I40IW_QHASH_MANAGE_TYPE_ADD,
- (void *)cm_node,
- false);
- cm_node->qhash_set = true;
- break;
- case I40IW_CM_STATE_CLOSED:
- i40iw_cleanup_retrans_entry(cm_node);
- atomic_inc(&cm_node->ref_count);
- i40iw_send_reset(cm_node);
- break;
- case I40IW_CM_STATE_OFFLOADED:
- case I40IW_CM_STATE_ESTABLISHED:
- case I40IW_CM_STATE_FIN_WAIT1:
- case I40IW_CM_STATE_FIN_WAIT2:
- case I40IW_CM_STATE_MPAREQ_RCVD:
- case I40IW_CM_STATE_LAST_ACK:
- case I40IW_CM_STATE_CLOSING:
- case I40IW_CM_STATE_UNKNOWN:
- default:
- break;
- }
-}
-
-/**
- * i40iw_handle_synack_pkt - Process SYN+ACK packet (active side)
- * @cm_node: connection's node
- * @rbuf: receive buffer
- */
-static void i40iw_handle_synack_pkt(struct i40iw_cm_node *cm_node,
- struct i40iw_puda_buf *rbuf)
-{
- struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
- int ret;
- u32 inc_sequence;
- int optionsize;
-
- optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
- inc_sequence = ntohl(tcph->seq);
- switch (cm_node->state) {
- case I40IW_CM_STATE_SYN_SENT:
- i40iw_cleanup_retrans_entry(cm_node);
- /* active open */
- if (i40iw_check_syn(cm_node, tcph)) {
- i40iw_pr_err("check syn fail\n");
- return;
- }
- cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
- /* setup options */
- ret = i40iw_handle_tcp_options(cm_node, tcph, optionsize, 0);
- if (ret) {
- i40iw_debug(cm_node->dev,
- I40IW_DEBUG_CM,
- "cm_node=%p tcp_options failed\n",
- cm_node);
- break;
- }
- i40iw_cleanup_retrans_entry(cm_node);
- cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1;
- i40iw_send_ack(cm_node); /* ACK for the syn_ack */
- ret = i40iw_send_mpa_request(cm_node);
- if (ret) {
- i40iw_debug(cm_node->dev,
- I40IW_DEBUG_CM,
- "cm_node=%p i40iw_send_mpa_request failed\n",
- cm_node);
- break;
- }
- cm_node->state = I40IW_CM_STATE_MPAREQ_SENT;
- break;
- case I40IW_CM_STATE_MPAREQ_RCVD:
- i40iw_passive_open_err(cm_node, true);
- break;
- case I40IW_CM_STATE_LISTENING:
- cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
- i40iw_cleanup_retrans_entry(cm_node);
- cm_node->state = I40IW_CM_STATE_CLOSED;
- i40iw_send_reset(cm_node);
- break;
- case I40IW_CM_STATE_CLOSED:
- cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
- i40iw_cleanup_retrans_entry(cm_node);
- atomic_inc(&cm_node->ref_count);
- i40iw_send_reset(cm_node);
- break;
- case I40IW_CM_STATE_ESTABLISHED:
- case I40IW_CM_STATE_FIN_WAIT1:
- case I40IW_CM_STATE_FIN_WAIT2:
- case I40IW_CM_STATE_LAST_ACK:
- case I40IW_CM_STATE_OFFLOADED:
- case I40IW_CM_STATE_CLOSING:
- case I40IW_CM_STATE_UNKNOWN:
- case I40IW_CM_STATE_MPAREQ_SENT:
- default:
- break;
- }
-}
-
-/**
- * i40iw_handle_ack_pkt - process packet with ACK
- * @cm_node: connection's node
- * @rbuf: receive buffer
- */
-static int i40iw_handle_ack_pkt(struct i40iw_cm_node *cm_node,
- struct i40iw_puda_buf *rbuf)
-{
- struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
- u32 inc_sequence;
- int ret = 0;
- int optionsize;
- u32 datasize = rbuf->datalen;
-
- optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
-
- if (i40iw_check_seq(cm_node, tcph))
- return -EINVAL;
-
- inc_sequence = ntohl(tcph->seq);
- switch (cm_node->state) {
- case I40IW_CM_STATE_SYN_RCVD:
- i40iw_cleanup_retrans_entry(cm_node);
- ret = i40iw_handle_tcp_options(cm_node, tcph, optionsize, 1);
- if (ret)
- break;
- cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
- cm_node->state = I40IW_CM_STATE_ESTABLISHED;
- if (datasize) {
- cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
- i40iw_handle_rcv_mpa(cm_node, rbuf);
- }
- break;
- case I40IW_CM_STATE_ESTABLISHED:
- i40iw_cleanup_retrans_entry(cm_node);
- if (datasize) {
- cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
- i40iw_handle_rcv_mpa(cm_node, rbuf);
- }
- break;
- case I40IW_CM_STATE_MPAREQ_SENT:
- cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
- if (datasize) {
- cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
- cm_node->ack_rcvd = false;
- i40iw_handle_rcv_mpa(cm_node, rbuf);
- } else {
- cm_node->ack_rcvd = true;
- }
- break;
- case I40IW_CM_STATE_LISTENING:
- i40iw_cleanup_retrans_entry(cm_node);
- cm_node->state = I40IW_CM_STATE_CLOSED;
- i40iw_send_reset(cm_node);
- break;
- case I40IW_CM_STATE_CLOSED:
- i40iw_cleanup_retrans_entry(cm_node);
- atomic_inc(&cm_node->ref_count);
- i40iw_send_reset(cm_node);
- break;
- case I40IW_CM_STATE_LAST_ACK:
- case I40IW_CM_STATE_CLOSING:
- i40iw_cleanup_retrans_entry(cm_node);
- cm_node->state = I40IW_CM_STATE_CLOSED;
- if (!cm_node->accept_pend)
- cm_node->cm_id->rem_ref(cm_node->cm_id);
- i40iw_rem_ref_cm_node(cm_node);
- break;
- case I40IW_CM_STATE_FIN_WAIT1:
- i40iw_cleanup_retrans_entry(cm_node);
- cm_node->state = I40IW_CM_STATE_FIN_WAIT2;
- break;
- case I40IW_CM_STATE_SYN_SENT:
- case I40IW_CM_STATE_FIN_WAIT2:
- case I40IW_CM_STATE_OFFLOADED:
- case I40IW_CM_STATE_MPAREQ_RCVD:
- case I40IW_CM_STATE_UNKNOWN:
- default:
- i40iw_cleanup_retrans_entry(cm_node);
- break;
- }
- return ret;
-}
-
-/**
- * i40iw_process_packet - process cm packet
- * @cm_node: connection's node
- * @rbuf: receive buffer
- */
-static void i40iw_process_packet(struct i40iw_cm_node *cm_node,
- struct i40iw_puda_buf *rbuf)
-{
- enum i40iw_tcpip_pkt_type pkt_type = I40IW_PKT_TYPE_UNKNOWN;
- struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
- u32 fin_set = 0;
- int ret;
-
- if (tcph->rst) {
- pkt_type = I40IW_PKT_TYPE_RST;
- } else if (tcph->syn) {
- pkt_type = I40IW_PKT_TYPE_SYN;
- if (tcph->ack)
- pkt_type = I40IW_PKT_TYPE_SYNACK;
- } else if (tcph->ack) {
- pkt_type = I40IW_PKT_TYPE_ACK;
- }
- if (tcph->fin)
- fin_set = 1;
-
- switch (pkt_type) {
- case I40IW_PKT_TYPE_SYN:
- i40iw_handle_syn_pkt(cm_node, rbuf);
- break;
- case I40IW_PKT_TYPE_SYNACK:
- i40iw_handle_synack_pkt(cm_node, rbuf);
- break;
- case I40IW_PKT_TYPE_ACK:
- ret = i40iw_handle_ack_pkt(cm_node, rbuf);
- if (fin_set && !ret)
- i40iw_handle_fin_pkt(cm_node);
- break;
- case I40IW_PKT_TYPE_RST:
- i40iw_handle_rst_pkt(cm_node, rbuf);
- break;
- default:
- if (fin_set &&
- (!i40iw_check_seq(cm_node, (struct tcphdr *)rbuf->tcph)))
- i40iw_handle_fin_pkt(cm_node);
- break;
- }
-}
-
-/**
- * i40iw_make_listen_node - create a listen node with params
- * @cm_core: cm's core
- * @iwdev: iwarp device structure
- * @cm_info: quad info for connection
- */
-static struct i40iw_cm_listener *i40iw_make_listen_node(
- struct i40iw_cm_core *cm_core,
- struct i40iw_device *iwdev,
- struct i40iw_cm_info *cm_info)
-{
- struct i40iw_cm_listener *listener;
- unsigned long flags;
-
- /* cannot have multiple matching listeners */
- listener = i40iw_find_listener(cm_core, cm_info->loc_addr,
- cm_info->loc_port,
- cm_info->vlan_id,
- I40IW_CM_LISTENER_EITHER_STATE);
- if (listener &&
- (listener->listener_state == I40IW_CM_LISTENER_ACTIVE_STATE)) {
- atomic_dec(&listener->ref_count);
- i40iw_debug(cm_core->dev,
- I40IW_DEBUG_CM,
- "Not creating listener since it already exists\n");
- return NULL;
- }
-
- if (!listener) {
- /* create a CM listen node (1/2 node to compare incoming traffic to) */
- listener = kzalloc(sizeof(*listener), GFP_KERNEL);
- if (!listener)
- return NULL;
- cm_core->stats_listen_nodes_created++;
- memcpy(listener->loc_addr, cm_info->loc_addr, sizeof(listener->loc_addr));
- listener->loc_port = cm_info->loc_port;
-
- INIT_LIST_HEAD(&listener->child_listen_list);
-
- atomic_set(&listener->ref_count, 1);
- } else {
- listener->reused_node = 1;
- }
-
- listener->cm_id = cm_info->cm_id;
- listener->ipv4 = cm_info->ipv4;
- listener->vlan_id = cm_info->vlan_id;
- atomic_set(&listener->pend_accepts_cnt, 0);
- listener->cm_core = cm_core;
- listener->iwdev = iwdev;
-
- listener->backlog = cm_info->backlog;
- listener->listener_state = I40IW_CM_LISTENER_ACTIVE_STATE;
-
- if (!listener->reused_node) {
- spin_lock_irqsave(&cm_core->listen_list_lock, flags);
- list_add(&listener->list, &cm_core->listen_nodes);
- spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
- }
-
- return listener;
-}
-
-/**
- * i40iw_create_cm_node - make a connection node with params
- * @cm_core: cm's core
- * @iwdev: iwarp device structure
- * @conn_param: upper layer connection parameters
- * @cm_info: quad info for connection
- */
-static struct i40iw_cm_node *i40iw_create_cm_node(
- struct i40iw_cm_core *cm_core,
- struct i40iw_device *iwdev,
- struct iw_cm_conn_param *conn_param,
- struct i40iw_cm_info *cm_info)
-{
- struct i40iw_cm_node *cm_node;
- struct i40iw_cm_listener *loopback_remotelistener;
- struct i40iw_cm_node *loopback_remotenode;
- struct i40iw_cm_info loopback_cm_info;
-
- u16 private_data_len = conn_param->private_data_len;
- const void *private_data = conn_param->private_data;
-
- /* create a CM connection node */
- cm_node = i40iw_make_cm_node(cm_core, iwdev, cm_info, NULL);
- if (!cm_node)
- return ERR_PTR(-ENOMEM);
- /* set our node side to client (active) side */
- cm_node->tcp_cntxt.client = 1;
- cm_node->tcp_cntxt.rcv_wscale = I40IW_CM_DEFAULT_RCV_WND_SCALE;
-
- i40iw_record_ird_ord(cm_node, conn_param->ird, conn_param->ord);
-
- if (!memcmp(cm_info->loc_addr, cm_info->rem_addr, sizeof(cm_info->loc_addr))) {
- loopback_remotelistener = i40iw_find_listener(
- cm_core,
- cm_info->rem_addr,
- cm_node->rem_port,
- cm_node->vlan_id,
- I40IW_CM_LISTENER_ACTIVE_STATE);
- if (!loopback_remotelistener) {
- i40iw_rem_ref_cm_node(cm_node);
- return ERR_PTR(-ECONNREFUSED);
- } else {
- loopback_cm_info = *cm_info;
- loopback_cm_info.loc_port = cm_info->rem_port;
- loopback_cm_info.rem_port = cm_info->loc_port;
- loopback_cm_info.cm_id = loopback_remotelistener->cm_id;
- loopback_cm_info.ipv4 = cm_info->ipv4;
- loopback_remotenode = i40iw_make_cm_node(cm_core,
- iwdev,
- &loopback_cm_info,
- loopback_remotelistener);
- if (!loopback_remotenode) {
- i40iw_rem_ref_cm_node(cm_node);
- return ERR_PTR(-ENOMEM);
- }
- cm_core->stats_loopbacks++;
- loopback_remotenode->loopbackpartner = cm_node;
- loopback_remotenode->tcp_cntxt.rcv_wscale =
- I40IW_CM_DEFAULT_RCV_WND_SCALE;
- cm_node->loopbackpartner = loopback_remotenode;
- memcpy(loopback_remotenode->pdata_buf, private_data,
- private_data_len);
- loopback_remotenode->pdata.size = private_data_len;
-
- if (loopback_remotenode->ord_size > cm_node->ird_size)
- loopback_remotenode->ord_size =
- cm_node->ird_size;
-
- cm_node->state = I40IW_CM_STATE_OFFLOADED;
- cm_node->tcp_cntxt.rcv_nxt =
- loopback_remotenode->tcp_cntxt.loc_seq_num;
- loopback_remotenode->tcp_cntxt.rcv_nxt =
- cm_node->tcp_cntxt.loc_seq_num;
- cm_node->tcp_cntxt.max_snd_wnd =
- loopback_remotenode->tcp_cntxt.rcv_wnd;
- loopback_remotenode->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.rcv_wnd;
- cm_node->tcp_cntxt.snd_wnd = loopback_remotenode->tcp_cntxt.rcv_wnd;
- loopback_remotenode->tcp_cntxt.snd_wnd = cm_node->tcp_cntxt.rcv_wnd;
- cm_node->tcp_cntxt.snd_wscale = loopback_remotenode->tcp_cntxt.rcv_wscale;
- loopback_remotenode->tcp_cntxt.snd_wscale = cm_node->tcp_cntxt.rcv_wscale;
- }
- return cm_node;
- }
-
- cm_node->pdata.size = private_data_len;
- cm_node->pdata.addr = cm_node->pdata_buf;
-
- memcpy(cm_node->pdata_buf, private_data, private_data_len);
-
- cm_node->state = I40IW_CM_STATE_SYN_SENT;
- return cm_node;
-}
-
-/**
- * i40iw_cm_reject - reject and teardown a connection
- * @cm_node: connection's node
- * @pdata: ptr to private data for reject
- * @plen: size of private data
- */
-static int i40iw_cm_reject(struct i40iw_cm_node *cm_node, const void *pdata, u8 plen)
-{
- int ret = 0;
- int err;
- int passive_state;
- struct iw_cm_id *cm_id = cm_node->cm_id;
- struct i40iw_cm_node *loopback = cm_node->loopbackpartner;
-
- if (cm_node->tcp_cntxt.client)
- return ret;
- i40iw_cleanup_retrans_entry(cm_node);
-
- if (!loopback) {
- passive_state = atomic_inc_return(&cm_node->passive_state);
- if (passive_state == I40IW_SEND_RESET_EVENT) {
- cm_node->state = I40IW_CM_STATE_CLOSED;
- i40iw_rem_ref_cm_node(cm_node);
- } else {
- if (cm_node->state == I40IW_CM_STATE_LISTENER_DESTROYED) {
- i40iw_rem_ref_cm_node(cm_node);
- } else {
- ret = i40iw_send_mpa_reject(cm_node, pdata, plen);
- if (ret) {
- cm_node->state = I40IW_CM_STATE_CLOSED;
- err = i40iw_send_reset(cm_node);
- if (err)
- i40iw_pr_err("send reset failed\n");
- } else {
- cm_id->add_ref(cm_id);
- }
- }
- }
- } else {
- cm_node->cm_id = NULL;
- if (cm_node->state == I40IW_CM_STATE_LISTENER_DESTROYED) {
- i40iw_rem_ref_cm_node(cm_node);
- i40iw_rem_ref_cm_node(loopback);
- } else {
- ret = i40iw_send_cm_event(loopback,
- loopback->cm_id,
- IW_CM_EVENT_CONNECT_REPLY,
- -ECONNREFUSED);
- i40iw_rem_ref_cm_node(cm_node);
- loopback->state = I40IW_CM_STATE_CLOSING;
-
- cm_id = loopback->cm_id;
- i40iw_rem_ref_cm_node(loopback);
- cm_id->rem_ref(cm_id);
- }
- }
-
- return ret;
-}
-
-/**
- * i40iw_cm_close - close of cm connection
- * @cm_node: connection's node
- */
-static int i40iw_cm_close(struct i40iw_cm_node *cm_node)
-{
- int ret = 0;
-
- if (!cm_node)
- return -EINVAL;
-
- switch (cm_node->state) {
- case I40IW_CM_STATE_SYN_RCVD:
- case I40IW_CM_STATE_SYN_SENT:
- case I40IW_CM_STATE_ONE_SIDE_ESTABLISHED:
- case I40IW_CM_STATE_ESTABLISHED:
- case I40IW_CM_STATE_ACCEPTING:
- case I40IW_CM_STATE_MPAREQ_SENT:
- case I40IW_CM_STATE_MPAREQ_RCVD:
- i40iw_cleanup_retrans_entry(cm_node);
- i40iw_send_reset(cm_node);
- break;
- case I40IW_CM_STATE_CLOSE_WAIT:
- cm_node->state = I40IW_CM_STATE_LAST_ACK;
- i40iw_send_fin(cm_node);
- break;
- case I40IW_CM_STATE_FIN_WAIT1:
- case I40IW_CM_STATE_FIN_WAIT2:
- case I40IW_CM_STATE_LAST_ACK:
- case I40IW_CM_STATE_TIME_WAIT:
- case I40IW_CM_STATE_CLOSING:
- ret = -1;
- break;
- case I40IW_CM_STATE_LISTENING:
- i40iw_cleanup_retrans_entry(cm_node);
- i40iw_send_reset(cm_node);
- break;
- case I40IW_CM_STATE_MPAREJ_RCVD:
- case I40IW_CM_STATE_UNKNOWN:
- case I40IW_CM_STATE_INITED:
- case I40IW_CM_STATE_CLOSED:
- case I40IW_CM_STATE_LISTENER_DESTROYED:
- i40iw_rem_ref_cm_node(cm_node);
- break;
- case I40IW_CM_STATE_OFFLOADED:
- if (cm_node->send_entry)
- i40iw_pr_err("send_entry\n");
- i40iw_rem_ref_cm_node(cm_node);
- break;
- }
- return ret;
-}
-
-/**
- * i40iw_receive_ilq - recv an ETHERNET packet, and process it
- * through CM
- * @vsi: pointer to the vsi structure
- * @rbuf: receive buffer
- */
-void i40iw_receive_ilq(struct i40iw_sc_vsi *vsi, struct i40iw_puda_buf *rbuf)
-{
- struct i40iw_cm_node *cm_node;
- struct i40iw_cm_listener *listener;
- struct iphdr *iph;
- struct ipv6hdr *ip6h;
- struct tcphdr *tcph;
- struct i40iw_cm_info cm_info;
- struct i40iw_sc_dev *dev = vsi->dev;
- struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
- struct i40iw_cm_core *cm_core = &iwdev->cm_core;
- struct vlan_ethhdr *ethh;
- u16 vtag;
-
- /* if vlan, then maclen = 18 else 14 */
- iph = (struct iphdr *)rbuf->iph;
- memset(&cm_info, 0, sizeof(cm_info));
-
- i40iw_debug_buf(dev,
- I40IW_DEBUG_ILQ,
- "RECEIVE ILQ BUFFER",
- rbuf->mem.va,
- rbuf->totallen);
- ethh = (struct vlan_ethhdr *)rbuf->mem.va;
-
- if (ethh->h_vlan_proto == htons(ETH_P_8021Q)) {
- vtag = ntohs(ethh->h_vlan_TCI);
- cm_info.user_pri = (vtag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
- cm_info.vlan_id = vtag & VLAN_VID_MASK;
- i40iw_debug(cm_core->dev,
- I40IW_DEBUG_CM,
- "%s vlan_id=%d\n",
- __func__,
- cm_info.vlan_id);
- } else {
- cm_info.vlan_id = I40IW_NO_VLAN;
- }
- tcph = (struct tcphdr *)rbuf->tcph;
-
- if (rbuf->ipv4) {
- cm_info.loc_addr[0] = ntohl(iph->daddr);
- cm_info.rem_addr[0] = ntohl(iph->saddr);
- cm_info.ipv4 = true;
- cm_info.tos = iph->tos;
- } else {
- ip6h = (struct ipv6hdr *)rbuf->iph;
- i40iw_copy_ip_ntohl(cm_info.loc_addr,
- ip6h->daddr.in6_u.u6_addr32);
- i40iw_copy_ip_ntohl(cm_info.rem_addr,
- ip6h->saddr.in6_u.u6_addr32);
- cm_info.ipv4 = false;
- cm_info.tos = (ip6h->priority << 4) | (ip6h->flow_lbl[0] >> 4);
- }
- cm_info.loc_port = ntohs(tcph->dest);
- cm_info.rem_port = ntohs(tcph->source);
- cm_node = i40iw_find_node(cm_core,
- cm_info.rem_port,
- cm_info.rem_addr,
- cm_info.loc_port,
- cm_info.loc_addr,
- true,
- false);
-
- if (!cm_node) {
- /* Only type of packet accepted are for */
- /* the PASSIVE open (syn only) */
- if (!tcph->syn || tcph->ack)
- return;
- listener =
- i40iw_find_listener(cm_core,
- cm_info.loc_addr,
- cm_info.loc_port,
- cm_info.vlan_id,
- I40IW_CM_LISTENER_ACTIVE_STATE);
- if (!listener) {
- cm_info.cm_id = NULL;
- i40iw_debug(cm_core->dev,
- I40IW_DEBUG_CM,
- "%s no listener found\n",
- __func__);
- return;
- }
- cm_info.cm_id = listener->cm_id;
- cm_node = i40iw_make_cm_node(cm_core, iwdev, &cm_info, listener);
- if (!cm_node) {
- i40iw_debug(cm_core->dev,
- I40IW_DEBUG_CM,
- "%s allocate node failed\n",
- __func__);
- atomic_dec(&listener->ref_count);
- return;
- }
- if (!tcph->rst && !tcph->fin) {
- cm_node->state = I40IW_CM_STATE_LISTENING;
- } else {
- i40iw_rem_ref_cm_node(cm_node);
- return;
- }
- atomic_inc(&cm_node->ref_count);
- } else if (cm_node->state == I40IW_CM_STATE_OFFLOADED) {
- i40iw_rem_ref_cm_node(cm_node);
- return;
- }
- i40iw_process_packet(cm_node, rbuf);
- i40iw_rem_ref_cm_node(cm_node);
-}
-
-/**
- * i40iw_setup_cm_core - allocate a top level instance of a cm
- * core
- * @iwdev: iwarp device structure
- */
-int i40iw_setup_cm_core(struct i40iw_device *iwdev)
-{
- struct i40iw_cm_core *cm_core = &iwdev->cm_core;
-
- cm_core->iwdev = iwdev;
- cm_core->dev = &iwdev->sc_dev;
-
- INIT_LIST_HEAD(&cm_core->accelerated_list);
- INIT_LIST_HEAD(&cm_core->non_accelerated_list);
- INIT_LIST_HEAD(&cm_core->listen_nodes);
-
- timer_setup(&cm_core->tcp_timer, i40iw_cm_timer_tick, 0);
-
- spin_lock_init(&cm_core->ht_lock);
- spin_lock_init(&cm_core->listen_list_lock);
- spin_lock_init(&cm_core->apbvt_lock);
-
- cm_core->event_wq = alloc_ordered_workqueue("iwewq",
- WQ_MEM_RECLAIM);
- if (!cm_core->event_wq)
- goto error;
-
- cm_core->disconn_wq = alloc_ordered_workqueue("iwdwq",
- WQ_MEM_RECLAIM);
- if (!cm_core->disconn_wq)
- goto error;
-
- return 0;
-error:
- i40iw_cleanup_cm_core(&iwdev->cm_core);
-
- return -ENOMEM;
-}
-
-/**
- * i40iw_cleanup_cm_core - deallocate a top level instance of a
- * cm core
- * @cm_core: cm's core
- */
-void i40iw_cleanup_cm_core(struct i40iw_cm_core *cm_core)
-{
- unsigned long flags;
-
- if (!cm_core)
- return;
-
- spin_lock_irqsave(&cm_core->ht_lock, flags);
- if (timer_pending(&cm_core->tcp_timer))
- del_timer_sync(&cm_core->tcp_timer);
- spin_unlock_irqrestore(&cm_core->ht_lock, flags);
-
- if (cm_core->event_wq)
- destroy_workqueue(cm_core->event_wq);
- if (cm_core->disconn_wq)
- destroy_workqueue(cm_core->disconn_wq);
-}
-
-/**
- * i40iw_init_tcp_ctx - setup qp context
- * @cm_node: connection's node
- * @tcp_info: offload info for tcp
- * @iwqp: associate qp for the connection
- */
-static void i40iw_init_tcp_ctx(struct i40iw_cm_node *cm_node,
- struct i40iw_tcp_offload_info *tcp_info,
- struct i40iw_qp *iwqp)
-{
- tcp_info->ipv4 = cm_node->ipv4;
- tcp_info->drop_ooo_seg = true;
- tcp_info->wscale = true;
- tcp_info->ignore_tcp_opt = true;
- tcp_info->ignore_tcp_uns_opt = true;
- tcp_info->no_nagle = false;
-
- tcp_info->ttl = I40IW_DEFAULT_TTL;
- tcp_info->rtt_var = cpu_to_le32(I40IW_DEFAULT_RTT_VAR);
- tcp_info->ss_thresh = cpu_to_le32(I40IW_DEFAULT_SS_THRESH);
- tcp_info->rexmit_thresh = I40IW_DEFAULT_REXMIT_THRESH;
-
- tcp_info->tcp_state = I40IW_TCP_STATE_ESTABLISHED;
- tcp_info->snd_wscale = cm_node->tcp_cntxt.snd_wscale;
- tcp_info->rcv_wscale = cm_node->tcp_cntxt.rcv_wscale;
-
- tcp_info->snd_nxt = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
- tcp_info->snd_wnd = cpu_to_le32(cm_node->tcp_cntxt.snd_wnd);
- tcp_info->rcv_nxt = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt);
- tcp_info->snd_max = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
-
- tcp_info->snd_una = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
- tcp_info->cwnd = cpu_to_le32(2 * cm_node->tcp_cntxt.mss);
- tcp_info->snd_wl1 = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt);
- tcp_info->snd_wl2 = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
- tcp_info->max_snd_window = cpu_to_le32(cm_node->tcp_cntxt.max_snd_wnd);
- tcp_info->rcv_wnd = cpu_to_le32(cm_node->tcp_cntxt.rcv_wnd <<
- cm_node->tcp_cntxt.rcv_wscale);
-
- tcp_info->flow_label = 0;
- tcp_info->snd_mss = cpu_to_le32(((u32)cm_node->tcp_cntxt.mss));
- if (cm_node->vlan_id <= VLAN_VID_MASK) {
- tcp_info->insert_vlan_tag = true;
- tcp_info->vlan_tag = cpu_to_le16(((u16)cm_node->user_pri << I40IW_VLAN_PRIO_SHIFT) |
- cm_node->vlan_id);
- }
- if (cm_node->ipv4) {
- tcp_info->src_port = cpu_to_le16(cm_node->loc_port);
- tcp_info->dst_port = cpu_to_le16(cm_node->rem_port);
-
- tcp_info->dest_ip_addr3 = cpu_to_le32(cm_node->rem_addr[0]);
- tcp_info->local_ipaddr3 = cpu_to_le32(cm_node->loc_addr[0]);
- tcp_info->arp_idx =
- cpu_to_le16((u16)i40iw_arp_table(
- iwqp->iwdev,
- &tcp_info->dest_ip_addr3,
- true,
- NULL,
- I40IW_ARP_RESOLVE));
- } else {
- tcp_info->src_port = cpu_to_le16(cm_node->loc_port);
- tcp_info->dst_port = cpu_to_le16(cm_node->rem_port);
- tcp_info->dest_ip_addr0 = cpu_to_le32(cm_node->rem_addr[0]);
- tcp_info->dest_ip_addr1 = cpu_to_le32(cm_node->rem_addr[1]);
- tcp_info->dest_ip_addr2 = cpu_to_le32(cm_node->rem_addr[2]);
- tcp_info->dest_ip_addr3 = cpu_to_le32(cm_node->rem_addr[3]);
- tcp_info->local_ipaddr0 = cpu_to_le32(cm_node->loc_addr[0]);
- tcp_info->local_ipaddr1 = cpu_to_le32(cm_node->loc_addr[1]);
- tcp_info->local_ipaddr2 = cpu_to_le32(cm_node->loc_addr[2]);
- tcp_info->local_ipaddr3 = cpu_to_le32(cm_node->loc_addr[3]);
- tcp_info->arp_idx =
- cpu_to_le16((u16)i40iw_arp_table(
- iwqp->iwdev,
- &tcp_info->dest_ip_addr0,
- false,
- NULL,
- I40IW_ARP_RESOLVE));
- }
-}
-
-/**
- * i40iw_cm_init_tsa_conn - setup qp for RTS
- * @iwqp: associate qp for the connection
- * @cm_node: connection's node
- */
-static void i40iw_cm_init_tsa_conn(struct i40iw_qp *iwqp,
- struct i40iw_cm_node *cm_node)
-{
- struct i40iw_tcp_offload_info tcp_info;
- struct i40iwarp_offload_info *iwarp_info;
- struct i40iw_qp_host_ctx_info *ctx_info;
- struct i40iw_device *iwdev = iwqp->iwdev;
- struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev;
-
- memset(&tcp_info, 0x00, sizeof(struct i40iw_tcp_offload_info));
- iwarp_info = &iwqp->iwarp_info;
- ctx_info = &iwqp->ctx_info;
-
- ctx_info->tcp_info = &tcp_info;
- ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
- ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
-
- iwarp_info->ord_size = cm_node->ord_size;
- iwarp_info->ird_size = i40iw_derive_hw_ird_setting(cm_node->ird_size);
-
- if (iwarp_info->ord_size == 1)
- iwarp_info->ord_size = 2;
-
- iwarp_info->rd_enable = true;
- iwarp_info->rdmap_ver = 1;
- iwarp_info->ddp_ver = 1;
-
- iwarp_info->pd_id = iwqp->iwpd->sc_pd.pd_id;
-
- ctx_info->tcp_info_valid = true;
- ctx_info->iwarp_info_valid = true;
- ctx_info->add_to_qoslist = true;
- ctx_info->user_pri = cm_node->user_pri;
-
- i40iw_init_tcp_ctx(cm_node, &tcp_info, iwqp);
- if (cm_node->snd_mark_en) {
- iwarp_info->snd_mark_en = true;
- iwarp_info->snd_mark_offset = (tcp_info.snd_nxt &
- SNDMARKER_SEQNMASK) + cm_node->lsmm_size;
- }
-
- cm_node->state = I40IW_CM_STATE_OFFLOADED;
- tcp_info.tcp_state = I40IW_TCP_STATE_ESTABLISHED;
- tcp_info.src_mac_addr_idx = iwdev->mac_ip_table_idx;
- tcp_info.tos = cm_node->tos;
-
- dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp, (u64 *)(iwqp->host_ctx.va), ctx_info);
-
- /* once tcp_info is set, no need to do it again */
- ctx_info->tcp_info_valid = false;
- ctx_info->iwarp_info_valid = false;
- ctx_info->add_to_qoslist = false;
-}
-
-/**
- * i40iw_cm_disconn - when a connection is being closed
- * @iwqp: associate qp for the connection
- */
-void i40iw_cm_disconn(struct i40iw_qp *iwqp)
-{
- struct disconn_work *work;
- struct i40iw_device *iwdev = iwqp->iwdev;
- struct i40iw_cm_core *cm_core = &iwdev->cm_core;
- unsigned long flags;
-
- work = kzalloc(sizeof(*work), GFP_ATOMIC);
- if (!work)
- return; /* Timer will clean up */
-
- spin_lock_irqsave(&iwdev->qptable_lock, flags);
- if (!iwdev->qp_table[iwqp->ibqp.qp_num]) {
- spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
- i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
- "%s qp_id %d is already freed\n",
- __func__, iwqp->ibqp.qp_num);
- kfree(work);
- return;
- }
- i40iw_qp_add_ref(&iwqp->ibqp);
- spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
-
- work->iwqp = iwqp;
- INIT_WORK(&work->work, i40iw_disconnect_worker);
- queue_work(cm_core->disconn_wq, &work->work);
- return;
-}
-
-/**
- * i40iw_qp_disconnect - free qp and close cm
- * @iwqp: associate qp for the connection
- */
-static void i40iw_qp_disconnect(struct i40iw_qp *iwqp)
-{
- struct i40iw_device *iwdev;
- struct i40iw_ib_device *iwibdev;
-
- iwdev = to_iwdev(iwqp->ibqp.device);
- if (!iwdev) {
- i40iw_pr_err("iwdev == NULL\n");
- return;
- }
-
- iwibdev = iwdev->iwibdev;
-
- if (iwqp->active_conn) {
- /* indicate this connection is NOT active */
- iwqp->active_conn = 0;
- } else {
- /* Need to free the Last Streaming Mode Message */
- if (iwqp->ietf_mem.va) {
- if (iwqp->lsmm_mr)
- iwibdev->ibdev.ops.dereg_mr(iwqp->lsmm_mr,
- NULL);
- i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->ietf_mem);
- }
- }
-
- /* close the CM node down if it is still active */
- if (iwqp->cm_node) {
- i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, "%s Call close API\n", __func__);
- i40iw_cm_close(iwqp->cm_node);
- }
-}
-
-/**
- * i40iw_cm_disconn_true - called by worker thread to disconnect qp
- * @iwqp: associate qp for the connection
- */
-static void i40iw_cm_disconn_true(struct i40iw_qp *iwqp)
-{
- struct iw_cm_id *cm_id;
- struct i40iw_device *iwdev;
- struct i40iw_sc_qp *qp = &iwqp->sc_qp;
- u16 last_ae;
- u8 original_hw_tcp_state;
- u8 original_ibqp_state;
- int disconn_status = 0;
- int issue_disconn = 0;
- int issue_close = 0;
- int issue_flush = 0;
- struct ib_event ibevent;
- unsigned long flags;
- int ret;
-
- if (!iwqp) {
- i40iw_pr_err("iwqp == NULL\n");
- return;
- }
-
- spin_lock_irqsave(&iwqp->lock, flags);
- cm_id = iwqp->cm_id;
- /* make sure we havent already closed this connection */
- if (!cm_id) {
- spin_unlock_irqrestore(&iwqp->lock, flags);
- return;
- }
-
- iwdev = to_iwdev(iwqp->ibqp.device);
-
- original_hw_tcp_state = iwqp->hw_tcp_state;
- original_ibqp_state = iwqp->ibqp_state;
- last_ae = iwqp->last_aeq;
-
- if (qp->term_flags) {
- issue_disconn = 1;
- issue_close = 1;
- iwqp->cm_id = NULL;
- /*When term timer expires after cm_timer, don't want
- *terminate-handler to issue cm_disconn which can re-free
- *a QP even after its refcnt=0.
- */
- i40iw_terminate_del_timer(qp);
- if (!iwqp->flush_issued) {
- iwqp->flush_issued = 1;
- issue_flush = 1;
- }
- } else if ((original_hw_tcp_state == I40IW_TCP_STATE_CLOSE_WAIT) ||
- ((original_ibqp_state == IB_QPS_RTS) &&
- (last_ae == I40IW_AE_LLP_CONNECTION_RESET))) {
- issue_disconn = 1;
- if (last_ae == I40IW_AE_LLP_CONNECTION_RESET)
- disconn_status = -ECONNRESET;
- }
-
- if (((original_hw_tcp_state == I40IW_TCP_STATE_CLOSED) ||
- (original_hw_tcp_state == I40IW_TCP_STATE_TIME_WAIT) ||
- (last_ae == I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE) ||
- (last_ae == I40IW_AE_LLP_CONNECTION_RESET) ||
- iwdev->reset)) {
- issue_close = 1;
- iwqp->cm_id = NULL;
- if (!iwqp->flush_issued) {
- iwqp->flush_issued = 1;
- issue_flush = 1;
- }
- }
-
- spin_unlock_irqrestore(&iwqp->lock, flags);
- if (issue_flush && !iwqp->destroyed) {
- /* Flush the queues */
- i40iw_flush_wqes(iwdev, iwqp);
-
- if (qp->term_flags && iwqp->ibqp.event_handler) {
- ibevent.device = iwqp->ibqp.device;
- ibevent.event = (qp->eventtype == TERM_EVENT_QP_FATAL) ?
- IB_EVENT_QP_FATAL : IB_EVENT_QP_ACCESS_ERR;
- ibevent.element.qp = &iwqp->ibqp;
- iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context);
- }
- }
-
- if (cm_id && cm_id->event_handler) {
- if (issue_disconn) {
- ret = i40iw_send_cm_event(NULL,
- cm_id,
- IW_CM_EVENT_DISCONNECT,
- disconn_status);
-
- if (ret)
- i40iw_debug(&iwdev->sc_dev,
- I40IW_DEBUG_CM,
- "disconnect event failed %s: - cm_id = %p\n",
- __func__, cm_id);
- }
- if (issue_close) {
- i40iw_qp_disconnect(iwqp);
- cm_id->provider_data = iwqp;
- ret = i40iw_send_cm_event(NULL, cm_id, IW_CM_EVENT_CLOSE, 0);
- if (ret)
- i40iw_debug(&iwdev->sc_dev,
- I40IW_DEBUG_CM,
- "close event failed %s: - cm_id = %p\n",
- __func__, cm_id);
- cm_id->rem_ref(cm_id);
- }
- }
-}
-
-/**
- * i40iw_disconnect_worker - worker for connection close
- * @work: points or disconn structure
- */
-static void i40iw_disconnect_worker(struct work_struct *work)
-{
- struct disconn_work *dwork = container_of(work, struct disconn_work, work);
- struct i40iw_qp *iwqp = dwork->iwqp;
-
- kfree(dwork);
- i40iw_cm_disconn_true(iwqp);
- i40iw_qp_rem_ref(&iwqp->ibqp);
-}
-
-/**
- * i40iw_accept - registered call for connection to be accepted
- * @cm_id: cm information for passive connection
- * @conn_param: accpet parameters
- */
-int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
-{
- struct ib_qp *ibqp;
- struct i40iw_qp *iwqp;
- struct i40iw_device *iwdev;
- struct i40iw_sc_dev *dev;
- struct i40iw_cm_core *cm_core;
- struct i40iw_cm_node *cm_node;
- struct ib_qp_attr attr;
- int passive_state;
- struct ib_mr *ibmr;
- struct i40iw_pd *iwpd;
- u16 buf_len = 0;
- struct i40iw_kmem_info accept;
- enum i40iw_status_code status;
- u64 tagged_offset;
- unsigned long flags;
-
- memset(&attr, 0, sizeof(attr));
- ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn);
- if (!ibqp)
- return -EINVAL;
-
- iwqp = to_iwqp(ibqp);
- iwdev = iwqp->iwdev;
- dev = &iwdev->sc_dev;
- cm_core = &iwdev->cm_core;
- cm_node = (struct i40iw_cm_node *)cm_id->provider_data;
-
- if (((struct sockaddr_in *)&cm_id->local_addr)->sin_family == AF_INET) {
- cm_node->ipv4 = true;
- cm_node->vlan_id = i40iw_get_vlan_ipv4(cm_node->loc_addr);
- } else {
- cm_node->ipv4 = false;
- i40iw_netdev_vlan_ipv6(cm_node->loc_addr, &cm_node->vlan_id);
- }
- i40iw_debug(cm_node->dev,
- I40IW_DEBUG_CM,
- "Accept vlan_id=%d\n",
- cm_node->vlan_id);
- if (cm_node->state == I40IW_CM_STATE_LISTENER_DESTROYED) {
- if (cm_node->loopbackpartner)
- i40iw_rem_ref_cm_node(cm_node->loopbackpartner);
- i40iw_rem_ref_cm_node(cm_node);
- return -EINVAL;
- }
-
- passive_state = atomic_inc_return(&cm_node->passive_state);
- if (passive_state == I40IW_SEND_RESET_EVENT) {
- i40iw_rem_ref_cm_node(cm_node);
- return -ECONNRESET;
- }
-
- cm_node->cm_core->stats_accepts++;
- iwqp->cm_node = (void *)cm_node;
- cm_node->iwqp = iwqp;
-
- buf_len = conn_param->private_data_len + I40IW_MAX_IETF_SIZE;
-
- status = i40iw_allocate_dma_mem(dev->hw, &iwqp->ietf_mem, buf_len, 1);
-
- if (status)
- return -ENOMEM;
- cm_node->pdata.size = conn_param->private_data_len;
- accept.addr = iwqp->ietf_mem.va;
- accept.size = i40iw_cm_build_mpa_frame(cm_node, &accept, MPA_KEY_REPLY);
- memcpy(accept.addr + accept.size, conn_param->private_data,
- conn_param->private_data_len);
-
- /* setup our first outgoing iWarp send WQE (the IETF frame response) */
- if ((cm_node->ipv4 &&
- !i40iw_ipv4_is_loopback(cm_node->loc_addr[0], cm_node->rem_addr[0])) ||
- (!cm_node->ipv4 &&
- !i40iw_ipv6_is_loopback(cm_node->loc_addr, cm_node->rem_addr))) {
- iwpd = iwqp->iwpd;
- tagged_offset = (uintptr_t)iwqp->ietf_mem.va;
- ibmr = i40iw_reg_phys_mr(&iwpd->ibpd,
- iwqp->ietf_mem.pa,
- buf_len,
- IB_ACCESS_LOCAL_WRITE,
- &tagged_offset);
- if (IS_ERR(ibmr)) {
- i40iw_free_dma_mem(dev->hw, &iwqp->ietf_mem);
- return -ENOMEM;
- }
-
- ibmr->pd = &iwpd->ibpd;
- ibmr->device = iwpd->ibpd.device;
- iwqp->lsmm_mr = ibmr;
- if (iwqp->page)
- iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
- dev->iw_priv_qp_ops->qp_send_lsmm(&iwqp->sc_qp,
- iwqp->ietf_mem.va,
- (accept.size + conn_param->private_data_len),
- ibmr->lkey);
-
- } else {
- if (iwqp->page)
- iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
- dev->iw_priv_qp_ops->qp_send_lsmm(&iwqp->sc_qp, NULL, 0, 0);
- }
-
- if (iwqp->page)
- kunmap(iwqp->page);
-
- iwqp->cm_id = cm_id;
- cm_node->cm_id = cm_id;
-
- cm_id->provider_data = (void *)iwqp;
- iwqp->active_conn = 0;
-
- cm_node->lsmm_size = accept.size + conn_param->private_data_len;
- i40iw_cm_init_tsa_conn(iwqp, cm_node);
- cm_id->add_ref(cm_id);
- i40iw_qp_add_ref(&iwqp->ibqp);
-
- attr.qp_state = IB_QPS_RTS;
- cm_node->qhash_set = false;
- i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
-
- cm_node->accelerated = true;
- spin_lock_irqsave(&cm_core->ht_lock, flags);
- list_move_tail(&cm_node->list, &cm_core->accelerated_list);
- spin_unlock_irqrestore(&cm_core->ht_lock, flags);
-
- status =
- i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_ESTABLISHED, 0);
- if (status)
- i40iw_debug(dev, I40IW_DEBUG_CM, "error sending cm event - ESTABLISHED\n");
-
- if (cm_node->loopbackpartner) {
- cm_node->loopbackpartner->pdata.size = conn_param->private_data_len;
-
- /* copy entire MPA frame to our cm_node's frame */
- memcpy(cm_node->loopbackpartner->pdata_buf,
- conn_param->private_data,
- conn_param->private_data_len);
- i40iw_create_event(cm_node->loopbackpartner, I40IW_CM_EVENT_CONNECTED);
- }
-
- if (cm_node->accept_pend) {
- atomic_dec(&cm_node->listener->pend_accepts_cnt);
- cm_node->accept_pend = 0;
- }
- return 0;
-}
-
-/**
- * i40iw_reject - registered call for connection to be rejected
- * @cm_id: cm information for passive connection
- * @pdata: private data to be sent
- * @pdata_len: private data length
- */
-int i40iw_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
-{
- struct i40iw_device *iwdev;
- struct i40iw_cm_node *cm_node;
- struct i40iw_cm_node *loopback;
-
- cm_node = (struct i40iw_cm_node *)cm_id->provider_data;
- loopback = cm_node->loopbackpartner;
- cm_node->cm_id = cm_id;
- cm_node->pdata.size = pdata_len;
-
- iwdev = to_iwdev(cm_id->device);
- if (!iwdev)
- return -EINVAL;
- cm_node->cm_core->stats_rejects++;
-
- if (pdata_len + sizeof(struct ietf_mpa_v2) > MAX_CM_BUFFER)
- return -EINVAL;
-
- if (loopback) {
- memcpy(&loopback->pdata_buf, pdata, pdata_len);
- loopback->pdata.size = pdata_len;
- }
-
- return i40iw_cm_reject(cm_node, pdata, pdata_len);
-}
-
-/**
- * i40iw_connect - registered call for connection to be established
- * @cm_id: cm information for passive connection
- * @conn_param: Information about the connection
- */
-int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
-{
- struct ib_qp *ibqp;
- struct i40iw_qp *iwqp;
- struct i40iw_device *iwdev;
- struct i40iw_cm_node *cm_node;
- struct i40iw_cm_info cm_info;
- struct sockaddr_in *laddr;
- struct sockaddr_in *raddr;
- struct sockaddr_in6 *laddr6;
- struct sockaddr_in6 *raddr6;
- int ret = 0;
-
- ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn);
- if (!ibqp)
- return -EINVAL;
- iwqp = to_iwqp(ibqp);
- if (!iwqp)
- return -EINVAL;
- iwdev = to_iwdev(iwqp->ibqp.device);
- if (!iwdev)
- return -EINVAL;
-
- laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
- raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
- laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
- raddr6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr;
-
- if (!(laddr->sin_port) || !(raddr->sin_port))
- return -EINVAL;
-
- iwqp->active_conn = 1;
- iwqp->cm_id = NULL;
- cm_id->provider_data = iwqp;
-
- /* set up the connection params for the node */
- if (cm_id->remote_addr.ss_family == AF_INET) {
- cm_info.ipv4 = true;
- memset(cm_info.loc_addr, 0, sizeof(cm_info.loc_addr));
- memset(cm_info.rem_addr, 0, sizeof(cm_info.rem_addr));
- cm_info.loc_addr[0] = ntohl(laddr->sin_addr.s_addr);
- cm_info.rem_addr[0] = ntohl(raddr->sin_addr.s_addr);
- cm_info.loc_port = ntohs(laddr->sin_port);
- cm_info.rem_port = ntohs(raddr->sin_port);
- cm_info.vlan_id = i40iw_get_vlan_ipv4(cm_info.loc_addr);
- } else {
- cm_info.ipv4 = false;
- i40iw_copy_ip_ntohl(cm_info.loc_addr,
- laddr6->sin6_addr.in6_u.u6_addr32);
- i40iw_copy_ip_ntohl(cm_info.rem_addr,
- raddr6->sin6_addr.in6_u.u6_addr32);
- cm_info.loc_port = ntohs(laddr6->sin6_port);
- cm_info.rem_port = ntohs(raddr6->sin6_port);
- i40iw_netdev_vlan_ipv6(cm_info.loc_addr, &cm_info.vlan_id);
- }
- cm_info.cm_id = cm_id;
- cm_info.tos = cm_id->tos;
- cm_info.user_pri = rt_tos2priority(cm_id->tos);
- i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, "%s TOS:[%d] UP:[%d]\n",
- __func__, cm_id->tos, cm_info.user_pri);
- cm_id->add_ref(cm_id);
- cm_node = i40iw_create_cm_node(&iwdev->cm_core, iwdev,
- conn_param, &cm_info);
-
- if (IS_ERR(cm_node)) {
- ret = PTR_ERR(cm_node);
- cm_id->rem_ref(cm_id);
- return ret;
- }
-
- if ((cm_info.ipv4 && (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr)) ||
- (!cm_info.ipv4 && memcmp(laddr6->sin6_addr.in6_u.u6_addr32,
- raddr6->sin6_addr.in6_u.u6_addr32,
- sizeof(laddr6->sin6_addr.in6_u.u6_addr32)))) {
- if (i40iw_manage_qhash(iwdev, &cm_info, I40IW_QHASH_TYPE_TCP_ESTABLISHED,
- I40IW_QHASH_MANAGE_TYPE_ADD, NULL, true)) {
- ret = -EINVAL;
- goto err;
- }
- cm_node->qhash_set = true;
- }
-
- if (i40iw_manage_apbvt(iwdev, cm_info.loc_port,
- I40IW_MANAGE_APBVT_ADD)) {
- ret = -EINVAL;
- goto err;
- }
-
- cm_node->apbvt_set = true;
- iwqp->cm_node = cm_node;
- cm_node->iwqp = iwqp;
- iwqp->cm_id = cm_id;
- i40iw_qp_add_ref(&iwqp->ibqp);
-
- if (cm_node->state != I40IW_CM_STATE_OFFLOADED) {
- cm_node->state = I40IW_CM_STATE_SYN_SENT;
- ret = i40iw_send_syn(cm_node, 0);
- if (ret)
- goto err;
- }
-
- if (cm_node->loopbackpartner) {
- cm_node->loopbackpartner->state = I40IW_CM_STATE_MPAREQ_RCVD;
- i40iw_create_event(cm_node->loopbackpartner,
- I40IW_CM_EVENT_MPA_REQ);
- }
-
- i40iw_debug(cm_node->dev,
- I40IW_DEBUG_CM,
- "Api - connect(): port=0x%04x, cm_node=%p, cm_id = %p.\n",
- cm_node->rem_port,
- cm_node,
- cm_node->cm_id);
-
- return 0;
-
-err:
- if (cm_info.ipv4)
- i40iw_debug(&iwdev->sc_dev,
- I40IW_DEBUG_CM,
- "Api - connect() FAILED: dest addr=%pI4",
- cm_info.rem_addr);
- else
- i40iw_debug(&iwdev->sc_dev,
- I40IW_DEBUG_CM,
- "Api - connect() FAILED: dest addr=%pI6",
- cm_info.rem_addr);
-
- i40iw_rem_ref_cm_node(cm_node);
- cm_id->rem_ref(cm_id);
- iwdev->cm_core.stats_connect_errs++;
- return ret;
-}
-
-/**
- * i40iw_create_listen - registered call creating listener
- * @cm_id: cm information for passive connection
- * @backlog: to max accept pending count
- */
-int i40iw_create_listen(struct iw_cm_id *cm_id, int backlog)
-{
- struct i40iw_device *iwdev;
- struct i40iw_cm_listener *cm_listen_node;
- struct i40iw_cm_info cm_info;
- enum i40iw_status_code ret;
- struct sockaddr_in *laddr;
- struct sockaddr_in6 *laddr6;
- bool wildcard = false;
-
- iwdev = to_iwdev(cm_id->device);
- if (!iwdev)
- return -EINVAL;
-
- laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
- laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
- memset(&cm_info, 0, sizeof(cm_info));
- if (laddr->sin_family == AF_INET) {
- cm_info.ipv4 = true;
- cm_info.loc_addr[0] = ntohl(laddr->sin_addr.s_addr);
- cm_info.loc_port = ntohs(laddr->sin_port);
-
- if (laddr->sin_addr.s_addr != INADDR_ANY)
- cm_info.vlan_id = i40iw_get_vlan_ipv4(cm_info.loc_addr);
- else
- wildcard = true;
-
- } else {
- cm_info.ipv4 = false;
- i40iw_copy_ip_ntohl(cm_info.loc_addr,
- laddr6->sin6_addr.in6_u.u6_addr32);
- cm_info.loc_port = ntohs(laddr6->sin6_port);
- if (ipv6_addr_type(&laddr6->sin6_addr) != IPV6_ADDR_ANY)
- i40iw_netdev_vlan_ipv6(cm_info.loc_addr,
- &cm_info.vlan_id);
- else
- wildcard = true;
- }
- cm_info.backlog = backlog;
- cm_info.cm_id = cm_id;
-
- cm_listen_node = i40iw_make_listen_node(&iwdev->cm_core, iwdev, &cm_info);
- if (!cm_listen_node) {
- i40iw_pr_err("cm_listen_node == NULL\n");
- return -ENOMEM;
- }
-
- cm_id->provider_data = cm_listen_node;
-
- cm_listen_node->tos = cm_id->tos;
- cm_listen_node->user_pri = rt_tos2priority(cm_id->tos);
- cm_info.user_pri = cm_listen_node->user_pri;
-
- if (!cm_listen_node->reused_node) {
- if (wildcard) {
- if (cm_info.ipv4)
- ret = i40iw_add_mqh_4(iwdev,
- &cm_info,
- cm_listen_node);
- else
- ret = i40iw_add_mqh_6(iwdev,
- &cm_info,
- cm_listen_node);
- if (ret)
- goto error;
-
- ret = i40iw_manage_apbvt(iwdev,
- cm_info.loc_port,
- I40IW_MANAGE_APBVT_ADD);
-
- if (ret)
- goto error;
- } else {
- ret = i40iw_manage_qhash(iwdev,
- &cm_info,
- I40IW_QHASH_TYPE_TCP_SYN,
- I40IW_QHASH_MANAGE_TYPE_ADD,
- NULL,
- true);
- if (ret)
- goto error;
- cm_listen_node->qhash_set = true;
- ret = i40iw_manage_apbvt(iwdev,
- cm_info.loc_port,
- I40IW_MANAGE_APBVT_ADD);
- if (ret)
- goto error;
- }
- }
- cm_id->add_ref(cm_id);
- cm_listen_node->cm_core->stats_listen_created++;
- return 0;
- error:
- i40iw_cm_del_listen(&iwdev->cm_core, (void *)cm_listen_node, false);
- return -EINVAL;
-}
-
-/**
- * i40iw_destroy_listen - registered call to destroy listener
- * @cm_id: cm information for passive connection
- */
-int i40iw_destroy_listen(struct iw_cm_id *cm_id)
-{
- struct i40iw_device *iwdev;
-
- iwdev = to_iwdev(cm_id->device);
- if (cm_id->provider_data)
- i40iw_cm_del_listen(&iwdev->cm_core, cm_id->provider_data, true);
- else
- i40iw_pr_err("cm_id->provider_data was NULL\n");
-
- cm_id->rem_ref(cm_id);
-
- return 0;
-}
-
-/**
- * i40iw_cm_event_connected - handle connected active node
- * @event: the info for cm_node of connection
- */
-static void i40iw_cm_event_connected(struct i40iw_cm_event *event)
-{
- struct i40iw_qp *iwqp;
- struct i40iw_device *iwdev;
- struct i40iw_cm_core *cm_core;
- struct i40iw_cm_node *cm_node;
- struct i40iw_sc_dev *dev;
- struct ib_qp_attr attr;
- struct iw_cm_id *cm_id;
- unsigned long flags;
- int status;
- bool read0;
-
- cm_node = event->cm_node;
- cm_id = cm_node->cm_id;
- iwqp = (struct i40iw_qp *)cm_id->provider_data;
- iwdev = to_iwdev(iwqp->ibqp.device);
- dev = &iwdev->sc_dev;
- cm_core = &iwdev->cm_core;
-
- if (iwqp->destroyed) {
- status = -ETIMEDOUT;
- goto error;
- }
- i40iw_cm_init_tsa_conn(iwqp, cm_node);
- read0 = (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO);
- if (iwqp->page)
- iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
- dev->iw_priv_qp_ops->qp_send_rtt(&iwqp->sc_qp, read0);
- if (iwqp->page)
- kunmap(iwqp->page);
-
- memset(&attr, 0, sizeof(attr));
- attr.qp_state = IB_QPS_RTS;
- cm_node->qhash_set = false;
- i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
-
- cm_node->accelerated = true;
- spin_lock_irqsave(&cm_core->ht_lock, flags);
- list_move_tail(&cm_node->list, &cm_core->accelerated_list);
- spin_unlock_irqrestore(&cm_core->ht_lock, flags);
- status = i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY,
- 0);
- if (status)
- i40iw_debug(dev, I40IW_DEBUG_CM, "error sending cm event - CONNECT_REPLY\n");
-
- return;
-
-error:
- iwqp->cm_id = NULL;
- cm_id->provider_data = NULL;
- i40iw_send_cm_event(event->cm_node,
- cm_id,
- IW_CM_EVENT_CONNECT_REPLY,
- status);
- cm_id->rem_ref(cm_id);
- i40iw_rem_ref_cm_node(event->cm_node);
-}
-
-/**
- * i40iw_cm_event_reset - handle reset
- * @event: the info for cm_node of connection
- */
-static void i40iw_cm_event_reset(struct i40iw_cm_event *event)
-{
- struct i40iw_cm_node *cm_node = event->cm_node;
- struct iw_cm_id *cm_id = cm_node->cm_id;
- struct i40iw_qp *iwqp;
-
- if (!cm_id)
- return;
-
- iwqp = cm_id->provider_data;
- if (!iwqp)
- return;
-
- i40iw_debug(cm_node->dev,
- I40IW_DEBUG_CM,
- "reset event %p - cm_id = %p\n",
- event->cm_node, cm_id);
- iwqp->cm_id = NULL;
-
- i40iw_send_cm_event(cm_node, cm_node->cm_id, IW_CM_EVENT_DISCONNECT, -ECONNRESET);
- i40iw_send_cm_event(cm_node, cm_node->cm_id, IW_CM_EVENT_CLOSE, 0);
-}
-
-/**
- * i40iw_cm_event_handler - worker thread callback to send event to cm upper layer
- * @work: pointer of cm event info.
- */
-static void i40iw_cm_event_handler(struct work_struct *work)
-{
- struct i40iw_cm_event *event = container_of(work,
- struct i40iw_cm_event,
- event_work);
- struct i40iw_cm_node *cm_node;
-
- if (!event || !event->cm_node || !event->cm_node->cm_core)
- return;
-
- cm_node = event->cm_node;
-
- switch (event->type) {
- case I40IW_CM_EVENT_MPA_REQ:
- i40iw_send_cm_event(cm_node,
- cm_node->cm_id,
- IW_CM_EVENT_CONNECT_REQUEST,
- 0);
- break;
- case I40IW_CM_EVENT_RESET:
- i40iw_cm_event_reset(event);
- break;
- case I40IW_CM_EVENT_CONNECTED:
- if (!event->cm_node->cm_id ||
- (event->cm_node->state != I40IW_CM_STATE_OFFLOADED))
- break;
- i40iw_cm_event_connected(event);
- break;
- case I40IW_CM_EVENT_MPA_REJECT:
- if (!event->cm_node->cm_id ||
- (cm_node->state == I40IW_CM_STATE_OFFLOADED))
- break;
- i40iw_send_cm_event(cm_node,
- cm_node->cm_id,
- IW_CM_EVENT_CONNECT_REPLY,
- -ECONNREFUSED);
- break;
- case I40IW_CM_EVENT_ABORTED:
- if (!event->cm_node->cm_id ||
- (event->cm_node->state == I40IW_CM_STATE_OFFLOADED))
- break;
- i40iw_event_connect_error(event);
- break;
- default:
- i40iw_pr_err("event type = %d\n", event->type);
- break;
- }
-
- event->cm_info.cm_id->rem_ref(event->cm_info.cm_id);
- i40iw_rem_ref_cm_node(event->cm_node);
- kfree(event);
-}
-
-/**
- * i40iw_cm_post_event - queue event request for worker thread
- * @event: cm node's info for up event call
- */
-static void i40iw_cm_post_event(struct i40iw_cm_event *event)
-{
- atomic_inc(&event->cm_node->ref_count);
- event->cm_info.cm_id->add_ref(event->cm_info.cm_id);
- INIT_WORK(&event->event_work, i40iw_cm_event_handler);
-
- queue_work(event->cm_node->cm_core->event_wq, &event->event_work);
-}
-
-/**
- * i40iw_qhash_ctrl - enable/disable qhash for list
- * @iwdev: device pointer
- * @parent_listen_node: parent listen node
- * @nfo: cm info node
- * @ipaddr: Pointer to IPv4 or IPv6 address
- * @ipv4: flag indicating IPv4 when true
- * @ifup: flag indicating interface up when true
- *
- * Enables or disables the qhash for the node in the child
- * listen list that matches ipaddr. If no matching IP was found
- * it will allocate and add a new child listen node to the
- * parent listen node. The listen_list_lock is assumed to be
- * held when called.
- */
-static void i40iw_qhash_ctrl(struct i40iw_device *iwdev,
- struct i40iw_cm_listener *parent_listen_node,
- struct i40iw_cm_info *nfo,
- u32 *ipaddr, bool ipv4, bool ifup)
-{
- struct list_head *child_listen_list = &parent_listen_node->child_listen_list;
- struct i40iw_cm_listener *child_listen_node;
- struct list_head *pos, *tpos;
- enum i40iw_status_code ret;
- bool node_allocated = false;
- enum i40iw_quad_hash_manage_type op =
- ifup ? I40IW_QHASH_MANAGE_TYPE_ADD : I40IW_QHASH_MANAGE_TYPE_DELETE;
-
- list_for_each_safe(pos, tpos, child_listen_list) {
- child_listen_node =
- list_entry(pos,
- struct i40iw_cm_listener,
- child_listen_list);
- if (!memcmp(child_listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16))
- goto set_qhash;
- }
-
- /* if not found then add a child listener if interface is going up */
- if (!ifup)
- return;
- child_listen_node = kmemdup(parent_listen_node,
- sizeof(*child_listen_node), GFP_ATOMIC);
- if (!child_listen_node)
- return;
- node_allocated = true;
-
- memcpy(child_listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16);
-
-set_qhash:
- memcpy(nfo->loc_addr,
- child_listen_node->loc_addr,
- sizeof(nfo->loc_addr));
- nfo->vlan_id = child_listen_node->vlan_id;
- ret = i40iw_manage_qhash(iwdev, nfo,
- I40IW_QHASH_TYPE_TCP_SYN,
- op,
- NULL, false);
- if (!ret) {
- child_listen_node->qhash_set = ifup;
- if (node_allocated)
- list_add(&child_listen_node->child_listen_list,
- &parent_listen_node->child_listen_list);
- } else if (node_allocated) {
- kfree(child_listen_node);
- }
-}
-
-/**
- * i40iw_cm_teardown_connections - teardown QPs
- * @iwdev: device pointer
- * @ipaddr: Pointer to IPv4 or IPv6 address
- * @nfo: cm info node
- * @disconnect_all: flag indicating disconnect all QPs
- * teardown QPs where source or destination addr matches ip addr
- */
-void i40iw_cm_teardown_connections(struct i40iw_device *iwdev, u32 *ipaddr,
- struct i40iw_cm_info *nfo,
- bool disconnect_all)
-{
- struct i40iw_cm_core *cm_core = &iwdev->cm_core;
- struct list_head *list_core_temp;
- struct list_head *list_node;
- struct i40iw_cm_node *cm_node;
- unsigned long flags;
- struct list_head teardown_list;
- struct ib_qp_attr attr;
-
- INIT_LIST_HEAD(&teardown_list);
- spin_lock_irqsave(&cm_core->ht_lock, flags);
- list_for_each_safe(list_node, list_core_temp,
- &cm_core->accelerated_list) {
- cm_node = container_of(list_node, struct i40iw_cm_node, list);
- if (disconnect_all ||
- (nfo->vlan_id == cm_node->vlan_id &&
- (!memcmp(cm_node->loc_addr, ipaddr, nfo->ipv4 ? 4 : 16) ||
- !memcmp(cm_node->rem_addr, ipaddr, nfo->ipv4 ? 4 : 16)))) {
- atomic_inc(&cm_node->ref_count);
- list_add(&cm_node->teardown_entry, &teardown_list);
- }
- }
- list_for_each_safe(list_node, list_core_temp,
- &cm_core->non_accelerated_list) {
- cm_node = container_of(list_node, struct i40iw_cm_node, list);
- if (disconnect_all ||
- (nfo->vlan_id == cm_node->vlan_id &&
- (!memcmp(cm_node->loc_addr, ipaddr, nfo->ipv4 ? 4 : 16) ||
- !memcmp(cm_node->rem_addr, ipaddr, nfo->ipv4 ? 4 : 16)))) {
- atomic_inc(&cm_node->ref_count);
- list_add(&cm_node->teardown_entry, &teardown_list);
- }
- }
- spin_unlock_irqrestore(&cm_core->ht_lock, flags);
-
- list_for_each_safe(list_node, list_core_temp, &teardown_list) {
- cm_node = container_of(list_node, struct i40iw_cm_node,
- teardown_entry);
- attr.qp_state = IB_QPS_ERR;
- i40iw_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL);
- if (iwdev->reset)
- i40iw_cm_disconn(cm_node->iwqp);
- i40iw_rem_ref_cm_node(cm_node);
- }
-}
-
-/**
- * i40iw_if_notify - process an ifdown on an interface
- * @iwdev: device pointer
- * @netdev: network interface device structure
- * @ipaddr: Pointer to IPv4 or IPv6 address
- * @ipv4: flag indicating IPv4 when true
- * @ifup: flag indicating interface up when true
- */
-void i40iw_if_notify(struct i40iw_device *iwdev, struct net_device *netdev,
- u32 *ipaddr, bool ipv4, bool ifup)
-{
- struct i40iw_cm_core *cm_core = &iwdev->cm_core;
- unsigned long flags;
- struct i40iw_cm_listener *listen_node;
- static const u32 ip_zero[4] = { 0, 0, 0, 0 };
- struct i40iw_cm_info nfo;
- u16 vlan_id = rdma_vlan_dev_vlan_id(netdev);
- enum i40iw_status_code ret;
- enum i40iw_quad_hash_manage_type op =
- ifup ? I40IW_QHASH_MANAGE_TYPE_ADD : I40IW_QHASH_MANAGE_TYPE_DELETE;
-
- nfo.vlan_id = vlan_id;
- nfo.ipv4 = ipv4;
-
- /* Disable or enable qhash for listeners */
- spin_lock_irqsave(&cm_core->listen_list_lock, flags);
- list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
- if (vlan_id == listen_node->vlan_id &&
- (!memcmp(listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16) ||
- !memcmp(listen_node->loc_addr, ip_zero, ipv4 ? 4 : 16))) {
- memcpy(nfo.loc_addr, listen_node->loc_addr,
- sizeof(nfo.loc_addr));
- nfo.loc_port = listen_node->loc_port;
- nfo.user_pri = listen_node->user_pri;
- if (!list_empty(&listen_node->child_listen_list)) {
- i40iw_qhash_ctrl(iwdev,
- listen_node,
- &nfo,
- ipaddr, ipv4, ifup);
- } else if (memcmp(listen_node->loc_addr, ip_zero,
- ipv4 ? 4 : 16)) {
- ret = i40iw_manage_qhash(iwdev,
- &nfo,
- I40IW_QHASH_TYPE_TCP_SYN,
- op,
- NULL,
- false);
- if (!ret)
- listen_node->qhash_set = ifup;
- }
- }
- }
- spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
-
- /* teardown connected qp's on ifdown */
- if (!ifup)
- i40iw_cm_teardown_connections(iwdev, ipaddr, &nfo, false);
-}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.h b/drivers/infiniband/hw/i40iw/i40iw_cm.h
deleted file mode 100644
index 6e43e4d730f4..000000000000
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.h
+++ /dev/null
@@ -1,462 +0,0 @@
-/*******************************************************************************
-*
-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
-*
-* This software is available to you under a choice of one of two
-* licenses. You may choose to be licensed under the terms of the GNU
-* General Public License (GPL) Version 2, available from the file
-* COPYING in the main directory of this source tree, or the
-* OpenFabrics.org BSD license below:
-*
-* Redistribution and use in source and binary forms, with or
-* without modification, are permitted provided that the following
-* conditions are met:
-*
-* - Redistributions of source code must retain the above
-* copyright notice, this list of conditions and the following
-* disclaimer.
-*
-* - Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials
-* provided with the distribution.
-*
-* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-* SOFTWARE.
-*
-*******************************************************************************/
-
-#ifndef I40IW_CM_H
-#define I40IW_CM_H
-
-#define QUEUE_EVENTS
-
-#define I40IW_MANAGE_APBVT_DEL 0
-#define I40IW_MANAGE_APBVT_ADD 1
-
-#define I40IW_MPA_REQUEST_ACCEPT 1
-#define I40IW_MPA_REQUEST_REJECT 2
-
-/* IETF MPA -- defines, enums, structs */
-#define IEFT_MPA_KEY_REQ "MPA ID Req Frame"
-#define IEFT_MPA_KEY_REP "MPA ID Rep Frame"
-#define IETF_MPA_KEY_SIZE 16
-#define IETF_MPA_VERSION 1
-#define IETF_MAX_PRIV_DATA_LEN 512
-#define IETF_MPA_FRAME_SIZE 20
-#define IETF_RTR_MSG_SIZE 4
-#define IETF_MPA_V2_FLAG 0x10
-#define SNDMARKER_SEQNMASK 0x000001FF
-
-#define I40IW_MAX_IETF_SIZE 32
-
-/* IETF RTR MSG Fields */
-#define IETF_PEER_TO_PEER 0x8000
-#define IETF_FLPDU_ZERO_LEN 0x4000
-#define IETF_RDMA0_WRITE 0x8000
-#define IETF_RDMA0_READ 0x4000
-#define IETF_NO_IRD_ORD 0x3FFF
-
-/* HW-supported IRD sizes*/
-#define I40IW_HW_IRD_SETTING_2 2
-#define I40IW_HW_IRD_SETTING_4 4
-#define I40IW_HW_IRD_SETTING_8 8
-#define I40IW_HW_IRD_SETTING_16 16
-#define I40IW_HW_IRD_SETTING_32 32
-#define I40IW_HW_IRD_SETTING_64 64
-
-#define MAX_PORTS 65536
-#define I40IW_VLAN_PRIO_SHIFT 13
-
-enum ietf_mpa_flags {
- IETF_MPA_FLAGS_MARKERS = 0x80, /* receive Markers */
- IETF_MPA_FLAGS_CRC = 0x40, /* receive Markers */
- IETF_MPA_FLAGS_REJECT = 0x20, /* Reject */
-};
-
-struct ietf_mpa_v1 {
- u8 key[IETF_MPA_KEY_SIZE];
- u8 flags;
- u8 rev;
- __be16 priv_data_len;
- u8 priv_data[];
-};
-
-#define ietf_mpa_req_resp_frame ietf_mpa_frame
-
-struct ietf_rtr_msg {
- __be16 ctrl_ird;
- __be16 ctrl_ord;
-};
-
-struct ietf_mpa_v2 {
- u8 key[IETF_MPA_KEY_SIZE];
- u8 flags;
- u8 rev;
- __be16 priv_data_len;
- struct ietf_rtr_msg rtr_msg;
- u8 priv_data[];
-};
-
-struct i40iw_cm_node;
-enum i40iw_timer_type {
- I40IW_TIMER_TYPE_SEND,
- I40IW_TIMER_TYPE_RECV,
- I40IW_TIMER_NODE_CLEANUP,
- I40IW_TIMER_TYPE_CLOSE,
-};
-
-#define I40IW_PASSIVE_STATE_INDICATED 0
-#define I40IW_DO_NOT_SEND_RESET_EVENT 1
-#define I40IW_SEND_RESET_EVENT 2
-
-#define MAX_I40IW_IFS 4
-
-#define SET_ACK 0x1
-#define SET_SYN 0x2
-#define SET_FIN 0x4
-#define SET_RST 0x8
-
-#define TCP_OPTIONS_PADDING 3
-
-struct option_base {
- u8 optionnum;
- u8 length;
-};
-
-enum option_numbers {
- OPTION_NUMBER_END,
- OPTION_NUMBER_NONE,
- OPTION_NUMBER_MSS,
- OPTION_NUMBER_WINDOW_SCALE,
- OPTION_NUMBER_SACK_PERM,
- OPTION_NUMBER_SACK,
- OPTION_NUMBER_WRITE0 = 0xbc
-};
-
-struct option_mss {
- u8 optionnum;
- u8 length;
- __be16 mss;
-};
-
-struct option_windowscale {
- u8 optionnum;
- u8 length;
- u8 shiftcount;
-};
-
-union all_known_options {
- char as_end;
- struct option_base as_base;
- struct option_mss as_mss;
- struct option_windowscale as_windowscale;
-};
-
-struct i40iw_timer_entry {
- struct list_head list;
- unsigned long timetosend; /* jiffies */
- struct i40iw_puda_buf *sqbuf;
- u32 type;
- u32 retrycount;
- u32 retranscount;
- u32 context;
- u32 send_retrans;
- int close_when_complete;
-};
-
-#define I40IW_DEFAULT_RETRYS 64
-#define I40IW_DEFAULT_RETRANS 8
-#define I40IW_DEFAULT_TTL 0x40
-#define I40IW_DEFAULT_RTT_VAR 0x6
-#define I40IW_DEFAULT_SS_THRESH 0x3FFFFFFF
-#define I40IW_DEFAULT_REXMIT_THRESH 8
-
-#define I40IW_RETRY_TIMEOUT HZ
-#define I40IW_SHORT_TIME 10
-#define I40IW_LONG_TIME (2 * HZ)
-#define I40IW_MAX_TIMEOUT ((unsigned long)(12 * HZ))
-
-#define I40IW_CM_HASHTABLE_SIZE 1024
-#define I40IW_CM_TCP_TIMER_INTERVAL 3000
-#define I40IW_CM_DEFAULT_MTU 1540
-#define I40IW_CM_DEFAULT_FRAME_CNT 10
-#define I40IW_CM_THREAD_STACK_SIZE 256
-#define I40IW_CM_DEFAULT_RCV_WND 64240
-#define I40IW_CM_DEFAULT_RCV_WND_SCALED 0x3fffc
-#define I40IW_CM_DEFAULT_RCV_WND_SCALE 2
-#define I40IW_CM_DEFAULT_FREE_PKTS 0x000A
-#define I40IW_CM_FREE_PKT_LO_WATERMARK 2
-
-#define I40IW_CM_DEFAULT_MSS 536
-
-#define I40IW_CM_DEF_SEQ 0x159bf75f
-#define I40IW_CM_DEF_LOCAL_ID 0x3b47
-
-#define I40IW_CM_DEF_SEQ2 0x18ed5740
-#define I40IW_CM_DEF_LOCAL_ID2 0xb807
-#define MAX_CM_BUFFER (I40IW_MAX_IETF_SIZE + IETF_MAX_PRIV_DATA_LEN)
-
-typedef u32 i40iw_addr_t;
-
-#define i40iw_cm_tsa_context i40iw_qp_context
-
-struct i40iw_qp;
-
-/* cm node transition states */
-enum i40iw_cm_node_state {
- I40IW_CM_STATE_UNKNOWN,
- I40IW_CM_STATE_INITED,
- I40IW_CM_STATE_LISTENING,
- I40IW_CM_STATE_SYN_RCVD,
- I40IW_CM_STATE_SYN_SENT,
- I40IW_CM_STATE_ONE_SIDE_ESTABLISHED,
- I40IW_CM_STATE_ESTABLISHED,
- I40IW_CM_STATE_ACCEPTING,
- I40IW_CM_STATE_MPAREQ_SENT,
- I40IW_CM_STATE_MPAREQ_RCVD,
- I40IW_CM_STATE_MPAREJ_RCVD,
- I40IW_CM_STATE_OFFLOADED,
- I40IW_CM_STATE_FIN_WAIT1,
- I40IW_CM_STATE_FIN_WAIT2,
- I40IW_CM_STATE_CLOSE_WAIT,
- I40IW_CM_STATE_TIME_WAIT,
- I40IW_CM_STATE_LAST_ACK,
- I40IW_CM_STATE_CLOSING,
- I40IW_CM_STATE_LISTENER_DESTROYED,
- I40IW_CM_STATE_CLOSED
-};
-
-enum mpa_frame_version {
- IETF_MPA_V1 = 1,
- IETF_MPA_V2 = 2
-};
-
-enum mpa_frame_key {
- MPA_KEY_REQUEST,
- MPA_KEY_REPLY
-};
-
-enum send_rdma0 {
- SEND_RDMA_READ_ZERO = 1,
- SEND_RDMA_WRITE_ZERO = 2
-};
-
-enum i40iw_tcpip_pkt_type {
- I40IW_PKT_TYPE_UNKNOWN,
- I40IW_PKT_TYPE_SYN,
- I40IW_PKT_TYPE_SYNACK,
- I40IW_PKT_TYPE_ACK,
- I40IW_PKT_TYPE_FIN,
- I40IW_PKT_TYPE_RST
-};
-
-/* CM context params */
-struct i40iw_cm_tcp_context {
- u8 client;
-
- u32 loc_seq_num;
- u32 loc_ack_num;
- u32 rem_ack_num;
- u32 rcv_nxt;
-
- u32 loc_id;
- u32 rem_id;
-
- u32 snd_wnd;
- u32 max_snd_wnd;
-
- u32 rcv_wnd;
- u32 mss;
- u8 snd_wscale;
- u8 rcv_wscale;
-};
-
-enum i40iw_cm_listener_state {
- I40IW_CM_LISTENER_PASSIVE_STATE = 1,
- I40IW_CM_LISTENER_ACTIVE_STATE = 2,
- I40IW_CM_LISTENER_EITHER_STATE = 3
-};
-
-struct i40iw_cm_listener {
- struct list_head list;
- struct i40iw_cm_core *cm_core;
- u8 loc_mac[ETH_ALEN];
- u32 loc_addr[4];
- u16 loc_port;
- struct iw_cm_id *cm_id;
- atomic_t ref_count;
- struct i40iw_device *iwdev;
- atomic_t pend_accepts_cnt;
- int backlog;
- enum i40iw_cm_listener_state listener_state;
- u32 reused_node;
- u8 user_pri;
- u8 tos;
- u16 vlan_id;
- bool qhash_set;
- bool ipv4;
- struct list_head child_listen_list;
-
-};
-
-struct i40iw_kmem_info {
- void *addr;
- u32 size;
-};
-
-/* per connection node and node state information */
-struct i40iw_cm_node {
- u32 loc_addr[4], rem_addr[4];
- u16 loc_port, rem_port;
- u16 vlan_id;
- enum i40iw_cm_node_state state;
- u8 loc_mac[ETH_ALEN];
- u8 rem_mac[ETH_ALEN];
- atomic_t ref_count;
- struct i40iw_qp *iwqp;
- struct i40iw_device *iwdev;
- struct i40iw_sc_dev *dev;
- struct i40iw_cm_tcp_context tcp_cntxt;
- struct i40iw_cm_core *cm_core;
- struct i40iw_cm_node *loopbackpartner;
- struct i40iw_timer_entry *send_entry;
- struct i40iw_timer_entry *close_entry;
- spinlock_t retrans_list_lock; /* cm transmit packet */
- enum send_rdma0 send_rdma0_op;
- u16 ird_size;
- u16 ord_size;
- u16 mpav2_ird_ord;
- struct iw_cm_id *cm_id;
- struct list_head list;
- bool accelerated;
- struct i40iw_cm_listener *listener;
- int apbvt_set;
- int accept_pend;
- struct list_head timer_entry;
- struct list_head reset_entry;
- struct list_head teardown_entry;
- atomic_t passive_state;
- bool qhash_set;
- u8 user_pri;
- u8 tos;
- bool ipv4;
- bool snd_mark_en;
- u16 lsmm_size;
- enum mpa_frame_version mpa_frame_rev;
- struct i40iw_kmem_info pdata;
- union {
- struct ietf_mpa_v1 mpa_frame;
- struct ietf_mpa_v2 mpa_v2_frame;
- };
-
- u8 pdata_buf[IETF_MAX_PRIV_DATA_LEN];
- struct i40iw_kmem_info mpa_hdr;
- bool ack_rcvd;
-};
-
-/* structure for client or CM to fill when making CM api calls. */
-/* - only need to set relevant data, based on op. */
-struct i40iw_cm_info {
- struct iw_cm_id *cm_id;
- u16 loc_port;
- u16 rem_port;
- u32 loc_addr[4];
- u32 rem_addr[4];
- u16 vlan_id;
- int backlog;
- u8 user_pri;
- u8 tos;
- bool ipv4;
-};
-
-/* CM event codes */
-enum i40iw_cm_event_type {
- I40IW_CM_EVENT_UNKNOWN,
- I40IW_CM_EVENT_ESTABLISHED,
- I40IW_CM_EVENT_MPA_REQ,
- I40IW_CM_EVENT_MPA_CONNECT,
- I40IW_CM_EVENT_MPA_ACCEPT,
- I40IW_CM_EVENT_MPA_REJECT,
- I40IW_CM_EVENT_MPA_ESTABLISHED,
- I40IW_CM_EVENT_CONNECTED,
- I40IW_CM_EVENT_RESET,
- I40IW_CM_EVENT_ABORTED
-};
-
-/* event to post to CM event handler */
-struct i40iw_cm_event {
- enum i40iw_cm_event_type type;
- struct i40iw_cm_info cm_info;
- struct work_struct event_work;
- struct i40iw_cm_node *cm_node;
-};
-
-struct i40iw_cm_core {
- struct i40iw_device *iwdev;
- struct i40iw_sc_dev *dev;
-
- struct list_head listen_nodes;
- struct list_head accelerated_list;
- struct list_head non_accelerated_list;
-
- struct timer_list tcp_timer;
-
- struct workqueue_struct *event_wq;
- struct workqueue_struct *disconn_wq;
-
- spinlock_t ht_lock; /* manage hash table */
- spinlock_t listen_list_lock; /* listen list */
- spinlock_t apbvt_lock; /*manage apbvt entries*/
-
- unsigned long ports_in_use[BITS_TO_LONGS(MAX_PORTS)];
-
- u64 stats_nodes_created;
- u64 stats_nodes_destroyed;
- u64 stats_listen_created;
- u64 stats_listen_destroyed;
- u64 stats_listen_nodes_created;
- u64 stats_listen_nodes_destroyed;
- u64 stats_loopbacks;
- u64 stats_accepts;
- u64 stats_rejects;
- u64 stats_connect_errs;
- u64 stats_passive_errs;
- u64 stats_pkt_retrans;
- u64 stats_backlog_drops;
-};
-
-int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
- struct i40iw_puda_buf *sqbuf,
- enum i40iw_timer_type type,
- int send_retrans,
- int close_when_complete);
-
-int i40iw_accept(struct iw_cm_id *, struct iw_cm_conn_param *);
-int i40iw_reject(struct iw_cm_id *, const void *, u8);
-int i40iw_connect(struct iw_cm_id *, struct iw_cm_conn_param *);
-int i40iw_create_listen(struct iw_cm_id *, int);
-int i40iw_destroy_listen(struct iw_cm_id *);
-
-int i40iw_cm_start(struct i40iw_device *);
-int i40iw_cm_stop(struct i40iw_device *);
-
-int i40iw_arp_table(struct i40iw_device *iwdev,
- u32 *ip_addr,
- bool ipv4,
- u8 *mac_addr,
- u32 action);
-
-void i40iw_if_notify(struct i40iw_device *iwdev, struct net_device *netdev,
- u32 *ipaddr, bool ipv4, bool ifup);
-void i40iw_cm_teardown_connections(struct i40iw_device *iwdev, u32 *ipaddr,
- struct i40iw_cm_info *nfo,
- bool disconnect_all);
-bool i40iw_port_in_use(struct i40iw_cm_core *cm_core, u16 port);
-#endif /* I40IW_CM_H */
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
deleted file mode 100644
index eaea5d545eb8..000000000000
--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+++ /dev/null
@@ -1,5243 +0,0 @@
-/*******************************************************************************
-*
-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
-*
-* This software is available to you under a choice of one of two
-* licenses. You may choose to be licensed under the terms of the GNU
-* General Public License (GPL) Version 2, available from the file
-* COPYING in the main directory of this source tree, or the
-* OpenFabrics.org BSD license below:
-*
-* Redistribution and use in source and binary forms, with or
-* without modification, are permitted provided that the following
-* conditions are met:
-*
-* - Redistributions of source code must retain the above
-* copyright notice, this list of conditions and the following
-* disclaimer.
-*
-* - Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials
-* provided with the distribution.
-*
-* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-* SOFTWARE.
-*
-*******************************************************************************/
-
-#include "i40iw_osdep.h"
-#include "i40iw_register.h"
-#include "i40iw_status.h"
-#include "i40iw_hmc.h"
-
-#include "i40iw_d.h"
-#include "i40iw_type.h"
-#include "i40iw_p.h"
-#include "i40iw_vf.h"
-#include "i40iw_virtchnl.h"
-
-/**
- * i40iw_insert_wqe_hdr - write wqe header
- * @wqe: cqp wqe for header
- * @header: header for the cqp wqe
- */
-void i40iw_insert_wqe_hdr(u64 *wqe, u64 header)
-{
- wmb(); /* make sure WQE is populated before polarity is set */
- set_64bit_val(wqe, 24, header);
-}
-
-void i40iw_check_cqp_progress(struct i40iw_cqp_timeout *cqp_timeout, struct i40iw_sc_dev *dev)
-{
- if (cqp_timeout->compl_cqp_cmds != dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS]) {
- cqp_timeout->compl_cqp_cmds = dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS];
- cqp_timeout->count = 0;
- } else {
- if (dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS] != cqp_timeout->compl_cqp_cmds)
- cqp_timeout->count++;
- }
-}
-
-/**
- * i40iw_get_cqp_reg_info - get head and tail for cqp using registers
- * @cqp: struct for cqp hw
- * @val: cqp tail register value
- * @tail:wqtail register value
- * @error: cqp processing err
- */
-static inline void i40iw_get_cqp_reg_info(struct i40iw_sc_cqp *cqp,
- u32 *val,
- u32 *tail,
- u32 *error)
-{
- if (cqp->dev->is_pf) {
- *val = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPTAIL);
- *tail = RS_32(*val, I40E_PFPE_CQPTAIL_WQTAIL);
- *error = RS_32(*val, I40E_PFPE_CQPTAIL_CQP_OP_ERR);
- } else {
- *val = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPTAIL1);
- *tail = RS_32(*val, I40E_VFPE_CQPTAIL_WQTAIL);
- *error = RS_32(*val, I40E_VFPE_CQPTAIL_CQP_OP_ERR);
- }
-}
-
-/**
- * i40iw_cqp_poll_registers - poll cqp registers
- * @cqp: struct for cqp hw
- * @tail:wqtail register value
- * @count: how many times to try for completion
- */
-static enum i40iw_status_code i40iw_cqp_poll_registers(
- struct i40iw_sc_cqp *cqp,
- u32 tail,
- u32 count)
-{
- u32 i = 0;
- u32 newtail, error, val;
-
- while (i < count) {
- i++;
- i40iw_get_cqp_reg_info(cqp, &val, &newtail, &error);
- if (error) {
- error = (cqp->dev->is_pf) ?
- i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPERRCODES) :
- i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPERRCODES1);
- return I40IW_ERR_CQP_COMPL_ERROR;
- }
- if (newtail != tail) {
- /* SUCCESS */
- I40IW_RING_MOVE_TAIL(cqp->sq_ring);
- cqp->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS]++;
- return 0;
- }
- udelay(I40IW_SLEEP_COUNT);
- }
- return I40IW_ERR_TIMEOUT;
-}
-
-/**
- * i40iw_sc_parse_fpm_commit_buf - parse fpm commit buffer
- * @buf: ptr to fpm commit buffer
- * @info: ptr to i40iw_hmc_obj_info struct
- * @sd: number of SDs for HMC objects
- *
- * parses fpm commit info and copy base value
- * of hmc objects in hmc_info
- */
-static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf(
- u64 *buf,
- struct i40iw_hmc_obj_info *info,
- u32 *sd)
-{
- u64 temp;
- u64 size;
- u64 base = 0;
- u32 i, j;
- u32 k = 0;
-
- /* copy base values in obj_info */
- for (i = I40IW_HMC_IW_QP, j = 0; i <= I40IW_HMC_IW_PBLE; i++, j += 8) {
- if ((i == I40IW_HMC_IW_SRQ) ||
- (i == I40IW_HMC_IW_FSIMC) ||
- (i == I40IW_HMC_IW_FSIAV)) {
- info[i].base = 0;
- info[i].cnt = 0;
- continue;
- }
- get_64bit_val(buf, j, &temp);
- info[i].base = RS_64_1(temp, 32) * 512;
- if (info[i].base > base) {
- base = info[i].base;
- k = i;
- }
- if (i == I40IW_HMC_IW_APBVT_ENTRY) {
- info[i].cnt = 1;
- continue;
- }
- if (i == I40IW_HMC_IW_QP)
- info[i].cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);
- else if (i == I40IW_HMC_IW_CQ)
- info[i].cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);
- else
- info[i].cnt = (u32)(temp);
- }
- size = info[k].cnt * info[k].size + info[k].base;
- if (size & 0x1FFFFF)
- *sd = (u32)((size >> 21) + 1); /* add 1 for remainder */
- else
- *sd = (u32)(size >> 21);
-
- return 0;
-}
-
-/**
- * i40iw_sc_decode_fpm_query() - Decode a 64 bit value into max count and size
- * @buf: ptr to fpm query buffer
- * @buf_idx: index into buf
- * @obj_info: ptr to i40iw_hmc_obj_info struct
- * @rsrc_idx: resource index into info
- *
- * Decode a 64 bit value from fpm query buffer into max count and size
- */
-static u64 i40iw_sc_decode_fpm_query(u64 *buf,
- u32 buf_idx,
- struct i40iw_hmc_obj_info *obj_info,
- u32 rsrc_idx)
-{
- u64 temp;
- u32 size;
-
- get_64bit_val(buf, buf_idx, &temp);
- obj_info[rsrc_idx].max_cnt = (u32)temp;
- size = (u32)RS_64_1(temp, 32);
- obj_info[rsrc_idx].size = LS_64_1(1, size);
-
- return temp;
-}
-
-/**
- * i40iw_sc_parse_fpm_query_buf() - parses fpm query buffer
- * @buf: ptr to fpm query buffer
- * @hmc_info: ptr to i40iw_hmc_obj_info struct
- * @hmc_fpm_misc: ptr to fpm data
- *
- * parses fpm query buffer and copy max_cnt and
- * size value of hmc objects in hmc_info
- */
-static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf(
- u64 *buf,
- struct i40iw_hmc_info *hmc_info,
- struct i40iw_hmc_fpm_misc *hmc_fpm_misc)
-{
- struct i40iw_hmc_obj_info *obj_info;
- u64 temp;
- u32 size;
- u16 max_pe_sds;
-
- obj_info = hmc_info->hmc_obj;
-
- get_64bit_val(buf, 0, &temp);
- hmc_info->first_sd_index = (u16)RS_64(temp, I40IW_QUERY_FPM_FIRST_PE_SD_INDEX);
- max_pe_sds = (u16)RS_64(temp, I40IW_QUERY_FPM_MAX_PE_SDS);
-
- /* Reduce SD count for VFs by 1 to account for PBLE backing page rounding */
- if (hmc_info->hmc_fn_id >= I40IW_FIRST_VF_FPM_ID)
- max_pe_sds--;
- hmc_fpm_misc->max_sds = max_pe_sds;
- hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index;
-
- get_64bit_val(buf, 8, &temp);
- obj_info[I40IW_HMC_IW_QP].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);
- size = (u32)RS_64_1(temp, 32);
- obj_info[I40IW_HMC_IW_QP].size = LS_64_1(1, size);
-
- get_64bit_val(buf, 16, &temp);
- obj_info[I40IW_HMC_IW_CQ].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);
- size = (u32)RS_64_1(temp, 32);
- obj_info[I40IW_HMC_IW_CQ].size = LS_64_1(1, size);
-
- i40iw_sc_decode_fpm_query(buf, 32, obj_info, I40IW_HMC_IW_HTE);
- i40iw_sc_decode_fpm_query(buf, 40, obj_info, I40IW_HMC_IW_ARP);
-
- obj_info[I40IW_HMC_IW_APBVT_ENTRY].size = 8192;
- obj_info[I40IW_HMC_IW_APBVT_ENTRY].max_cnt = 1;
-
- i40iw_sc_decode_fpm_query(buf, 48, obj_info, I40IW_HMC_IW_MR);
- i40iw_sc_decode_fpm_query(buf, 56, obj_info, I40IW_HMC_IW_XF);
-
- get_64bit_val(buf, 64, &temp);
- obj_info[I40IW_HMC_IW_XFFL].max_cnt = (u32)temp;
- obj_info[I40IW_HMC_IW_XFFL].size = 4;
- hmc_fpm_misc->xf_block_size = RS_64(temp, I40IW_QUERY_FPM_XFBLOCKSIZE);
- if (!hmc_fpm_misc->xf_block_size)
- return I40IW_ERR_INVALID_SIZE;
-
- i40iw_sc_decode_fpm_query(buf, 72, obj_info, I40IW_HMC_IW_Q1);
-
- get_64bit_val(buf, 80, &temp);
- obj_info[I40IW_HMC_IW_Q1FL].max_cnt = (u32)temp;
- obj_info[I40IW_HMC_IW_Q1FL].size = 4;
- hmc_fpm_misc->q1_block_size = RS_64(temp, I40IW_QUERY_FPM_Q1BLOCKSIZE);
- if (!hmc_fpm_misc->q1_block_size)
- return I40IW_ERR_INVALID_SIZE;
-
- i40iw_sc_decode_fpm_query(buf, 88, obj_info, I40IW_HMC_IW_TIMER);
-
- get_64bit_val(buf, 112, &temp);
- obj_info[I40IW_HMC_IW_PBLE].max_cnt = (u32)temp;
- obj_info[I40IW_HMC_IW_PBLE].size = 8;
-
- get_64bit_val(buf, 120, &temp);
- hmc_fpm_misc->max_ceqs = (u8)RS_64(temp, I40IW_QUERY_FPM_MAX_CEQS);
- hmc_fpm_misc->ht_multiplier = RS_64(temp, I40IW_QUERY_FPM_HTMULTIPLIER);
- hmc_fpm_misc->timer_bucket = RS_64(temp, I40IW_QUERY_FPM_TIMERBUCKET);
-
- return 0;
-}
-
-/**
- * i40iw_fill_qos_list - Change all unknown qs handles to available ones
- * @qs_list: list of qs_handles to be fixed with valid qs_handles
- */
-static void i40iw_fill_qos_list(u16 *qs_list)
-{
- u16 qshandle = qs_list[0];
- int i;
-
- for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
- if (qs_list[i] == QS_HANDLE_UNKNOWN)
- qs_list[i] = qshandle;
- else
- qshandle = qs_list[i];
- }
-}
-
-/**
- * i40iw_qp_from_entry - Given entry, get to the qp structure
- * @entry: Points to list of qp structure
- */
-static struct i40iw_sc_qp *i40iw_qp_from_entry(struct list_head *entry)
-{
- if (!entry)
- return NULL;
-
- return (struct i40iw_sc_qp *)((char *)entry - offsetof(struct i40iw_sc_qp, list));
-}
-
-/**
- * i40iw_get_qp - get the next qp from the list given current qp
- * @head: Listhead of qp's
- * @qp: current qp
- */
-static struct i40iw_sc_qp *i40iw_get_qp(struct list_head *head, struct i40iw_sc_qp *qp)
-{
- struct list_head *entry = NULL;
- struct list_head *lastentry;
-
- if (list_empty(head))
- return NULL;
-
- if (!qp) {
- entry = head->next;
- } else {
- lastentry = &qp->list;
- entry = (lastentry != head) ? lastentry->next : NULL;
- }
-
- return i40iw_qp_from_entry(entry);
-}
-
-/**
- * i40iw_change_l2params - given the new l2 parameters, change all qp
- * @vsi: pointer to the vsi structure
- * @l2params: New paramaters from l2
- */
-void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2params)
-{
- struct i40iw_sc_dev *dev = vsi->dev;
- struct i40iw_sc_qp *qp = NULL;
- bool qs_handle_change = false;
- unsigned long flags;
- u16 qs_handle;
- int i;
-
- if (vsi->mtu != l2params->mtu) {
- vsi->mtu = l2params->mtu;
- i40iw_reinitialize_ieq(dev);
- }
-
- i40iw_fill_qos_list(l2params->qs_handle_list);
- for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
- qs_handle = l2params->qs_handle_list[i];
- if (vsi->qos[i].qs_handle != qs_handle)
- qs_handle_change = true;
- spin_lock_irqsave(&vsi->qos[i].lock, flags);
- qp = i40iw_get_qp(&vsi->qos[i].qplist, qp);
- while (qp) {
- if (qs_handle_change) {
- qp->qs_handle = qs_handle;
- /* issue cqp suspend command */
- i40iw_qp_suspend_resume(dev, qp, true);
- }
- qp = i40iw_get_qp(&vsi->qos[i].qplist, qp);
- }
- spin_unlock_irqrestore(&vsi->qos[i].lock, flags);
- vsi->qos[i].qs_handle = qs_handle;
- }
-}
-
-/**
- * i40iw_qp_rem_qos - remove qp from qos lists during destroy qp
- * @qp: qp to be removed from qos
- */
-void i40iw_qp_rem_qos(struct i40iw_sc_qp *qp)
-{
- struct i40iw_sc_vsi *vsi = qp->vsi;
- unsigned long flags;
-
- if (!qp->on_qoslist)
- return;
- spin_lock_irqsave(&vsi->qos[qp->user_pri].lock, flags);
- list_del(&qp->list);
- spin_unlock_irqrestore(&vsi->qos[qp->user_pri].lock, flags);
-}
-
-/**
- * i40iw_qp_add_qos - called during setctx fot qp to be added to qos
- * @qp: qp to be added to qos
- */
-void i40iw_qp_add_qos(struct i40iw_sc_qp *qp)
-{
- struct i40iw_sc_vsi *vsi = qp->vsi;
- unsigned long flags;
-
- if (qp->on_qoslist)
- return;
- spin_lock_irqsave(&vsi->qos[qp->user_pri].lock, flags);
- qp->qs_handle = vsi->qos[qp->user_pri].qs_handle;
- list_add(&qp->list, &vsi->qos[qp->user_pri].qplist);
- qp->on_qoslist = true;
- spin_unlock_irqrestore(&vsi->qos[qp->user_pri].lock, flags);
-}
-
-/**
- * i40iw_sc_pd_init - initialize sc pd struct
- * @dev: sc device struct
- * @pd: sc pd ptr
- * @pd_id: pd_id for allocated pd
- * @abi_ver: ABI version from user context, -1 if not valid
- */
-static void i40iw_sc_pd_init(struct i40iw_sc_dev *dev,
- struct i40iw_sc_pd *pd,
- u16 pd_id,
- int abi_ver)
-{
- pd->size = sizeof(*pd);
- pd->pd_id = pd_id;
- pd->abi_ver = abi_ver;
- pd->dev = dev;
-}
-
-/**
- * i40iw_get_encoded_wqe_size - given wq size, returns hardware encoded size
- * @wqsize: size of the wq (sq, rq, srq) to encoded_size
- * @cqpsq: encoded size for sq for cqp as its encoded size is 1+ other wq's
- */
-u8 i40iw_get_encoded_wqe_size(u32 wqsize, bool cqpsq)
-{
- u8 encoded_size = 0;
-
- /* cqp sq's hw coded value starts from 1 for size of 4
- * while it starts from 0 for qp' wq's.
- */
- if (cqpsq)
- encoded_size = 1;
- wqsize >>= 2;
- while (wqsize >>= 1)
- encoded_size++;
- return encoded_size;
-}
-
-/**
- * i40iw_sc_cqp_init - Initialize buffers for a control Queue Pair
- * @cqp: IWARP control queue pair pointer
- * @info: IWARP control queue pair init info pointer
- *
- * Initializes the object and context buffers for a control Queue Pair.
- */
-static enum i40iw_status_code i40iw_sc_cqp_init(struct i40iw_sc_cqp *cqp,
- struct i40iw_cqp_init_info *info)
-{
- u8 hw_sq_size;
-
- if ((info->sq_size > I40IW_CQP_SW_SQSIZE_2048) ||
- (info->sq_size < I40IW_CQP_SW_SQSIZE_4) ||
- ((info->sq_size & (info->sq_size - 1))))
- return I40IW_ERR_INVALID_SIZE;
-
- hw_sq_size = i40iw_get_encoded_wqe_size(info->sq_size, true);
- cqp->size = sizeof(*cqp);
- cqp->sq_size = info->sq_size;
- cqp->hw_sq_size = hw_sq_size;
- cqp->sq_base = info->sq;
- cqp->host_ctx = info->host_ctx;
- cqp->sq_pa = info->sq_pa;
- cqp->host_ctx_pa = info->host_ctx_pa;
- cqp->dev = info->dev;
- cqp->struct_ver = info->struct_ver;
- cqp->scratch_array = info->scratch_array;
- cqp->polarity = 0;
- cqp->en_datacenter_tcp = info->en_datacenter_tcp;
- cqp->enabled_vf_count = info->enabled_vf_count;
- cqp->hmc_profile = info->hmc_profile;
- info->dev->cqp = cqp;
-
- I40IW_RING_INIT(cqp->sq_ring, cqp->sq_size);
- cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS] = 0;
- cqp->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS] = 0;
- INIT_LIST_HEAD(&cqp->dev->cqp_cmd_head); /* for the cqp commands backlog. */
-
- i40iw_wr32(cqp->dev->hw, I40E_PFPE_CQPTAIL, 0);
- i40iw_wr32(cqp->dev->hw, I40E_PFPE_CQPDB, 0);
-
- i40iw_debug(cqp->dev, I40IW_DEBUG_WQE,
- "%s: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%llxh] cqp[%p] polarity[x%04X]\n",
- __func__, cqp->sq_size, cqp->hw_sq_size,
- cqp->sq_base, cqp->sq_pa, cqp, cqp->polarity);
- return 0;
-}
-
-/**
- * i40iw_sc_cqp_create - create cqp during bringup
- * @cqp: struct for cqp hw
- * @maj_err: If error, major err number
- * @min_err: If error, minor err number
- */
-static enum i40iw_status_code i40iw_sc_cqp_create(struct i40iw_sc_cqp *cqp,
- u16 *maj_err,
- u16 *min_err)
-{
- u64 temp;
- u32 cnt = 0, p1, p2, val = 0, err_code;
- enum i40iw_status_code ret_code;
-
- *maj_err = 0;
- *min_err = 0;
-
- ret_code = i40iw_allocate_dma_mem(cqp->dev->hw,
- &cqp->sdbuf,
- I40IW_UPDATE_SD_BUF_SIZE * cqp->sq_size,
- I40IW_SD_BUF_ALIGNMENT);
-
- if (ret_code)
- goto exit;
-
- temp = LS_64(cqp->hw_sq_size, I40IW_CQPHC_SQSIZE) |
- LS_64(cqp->struct_ver, I40IW_CQPHC_SVER);
-
- set_64bit_val(cqp->host_ctx, 0, temp);
- set_64bit_val(cqp->host_ctx, 8, cqp->sq_pa);
- temp = LS_64(cqp->enabled_vf_count, I40IW_CQPHC_ENABLED_VFS) |
- LS_64(cqp->hmc_profile, I40IW_CQPHC_HMC_PROFILE);
- set_64bit_val(cqp->host_ctx, 16, temp);
- set_64bit_val(cqp->host_ctx, 24, (uintptr_t)cqp);
- set_64bit_val(cqp->host_ctx, 32, 0);
- set_64bit_val(cqp->host_ctx, 40, 0);
- set_64bit_val(cqp->host_ctx, 48, 0);
- set_64bit_val(cqp->host_ctx, 56, 0);
-
- i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQP_HOST_CTX",
- cqp->host_ctx, I40IW_CQP_CTX_SIZE * 8);
-
- p1 = RS_32_1(cqp->host_ctx_pa, 32);
- p2 = (u32)cqp->host_ctx_pa;
-
- if (cqp->dev->is_pf) {
- i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPHIGH, p1);
- i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPLOW, p2);
- } else {
- i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPHIGH1, p1);
- i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPLOW1, p2);
- }
- do {
- if (cnt++ > I40IW_DONE_COUNT) {
- i40iw_free_dma_mem(cqp->dev->hw, &cqp->sdbuf);
- ret_code = I40IW_ERR_TIMEOUT;
- /*
- * read PFPE_CQPERRORCODES register to get the minor
- * and major error code
- */
- if (cqp->dev->is_pf)
- err_code = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPERRCODES);
- else
- err_code = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPERRCODES1);
- *min_err = RS_32(err_code, I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE);
- *maj_err = RS_32(err_code, I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE);
- goto exit;
- }
- udelay(I40IW_SLEEP_COUNT);
- if (cqp->dev->is_pf)
- val = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CCQPSTATUS);
- else
- val = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CCQPSTATUS1);
- } while (!val);
-
-exit:
- if (!ret_code)
- cqp->process_cqp_sds = i40iw_update_sds_noccq;
- return ret_code;
-}
-
-/**
- * i40iw_sc_cqp_post_sq - post of cqp's sq
- * @cqp: struct for cqp hw
- */
-void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp)
-{
- if (cqp->dev->is_pf)
- i40iw_wr32(cqp->dev->hw, I40E_PFPE_CQPDB, I40IW_RING_GETCURRENT_HEAD(cqp->sq_ring));
- else
- i40iw_wr32(cqp->dev->hw, I40E_VFPE_CQPDB1, I40IW_RING_GETCURRENT_HEAD(cqp->sq_ring));
-
- i40iw_debug(cqp->dev,
- I40IW_DEBUG_WQE,
- "%s: HEAD_TAIL[%04d,%04d,%04d]\n",
- __func__,
- cqp->sq_ring.head,
- cqp->sq_ring.tail,
- cqp->sq_ring.size);
-}
-
-/**
- * i40iw_sc_cqp_get_next_send_wqe_idx - get next WQE on CQP SQ and pass back the index
- * @cqp: pointer to CQP structure
- * @scratch: private data for CQP WQE
- * @wqe_idx: WQE index for next WQE on CQP SQ
- */
-static u64 *i40iw_sc_cqp_get_next_send_wqe_idx(struct i40iw_sc_cqp *cqp,
- u64 scratch, u32 *wqe_idx)
-{
- u64 *wqe = NULL;
- enum i40iw_status_code ret_code;
-
- if (I40IW_RING_FULL_ERR(cqp->sq_ring)) {
- i40iw_debug(cqp->dev,
- I40IW_DEBUG_WQE,
- "%s: ring is full head %x tail %x size %x\n",
- __func__,
- cqp->sq_ring.head,
- cqp->sq_ring.tail,
- cqp->sq_ring.size);
- return NULL;
- }
- I40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, *wqe_idx, ret_code);
- cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS]++;
- if (ret_code)
- return NULL;
- if (!*wqe_idx)
- cqp->polarity = !cqp->polarity;
-
- wqe = cqp->sq_base[*wqe_idx].elem;
- cqp->scratch_array[*wqe_idx] = scratch;
- I40IW_CQP_INIT_WQE(wqe);
-
- return wqe;
-}
-
-/**
- * i40iw_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
- * @cqp: struct for cqp hw
- * @scratch: private data for CQP WQE
- */
-u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch)
-{
- u32 wqe_idx;
-
- return i40iw_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
-}
-
-/**
- * i40iw_sc_cqp_destroy - destroy cqp during close
- * @cqp: struct for cqp hw
- */
-static enum i40iw_status_code i40iw_sc_cqp_destroy(struct i40iw_sc_cqp *cqp)
-{
- u32 cnt = 0, val = 1;
- enum i40iw_status_code ret_code = 0;
- u32 cqpstat_addr;
-
- if (cqp->dev->is_pf) {
- i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPHIGH, 0);
- i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPLOW, 0);
- cqpstat_addr = I40E_PFPE_CCQPSTATUS;
- } else {
- i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPHIGH1, 0);
- i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPLOW1, 0);
- cqpstat_addr = I40E_VFPE_CCQPSTATUS1;
- }
- do {
- if (cnt++ > I40IW_DONE_COUNT) {
- ret_code = I40IW_ERR_TIMEOUT;
- break;
- }
- udelay(I40IW_SLEEP_COUNT);
- val = i40iw_rd32(cqp->dev->hw, cqpstat_addr);
- } while (val);
-
- i40iw_free_dma_mem(cqp->dev->hw, &cqp->sdbuf);
- return ret_code;
-}
-
-/**
- * i40iw_sc_ccq_arm - enable intr for control cq
- * @ccq: ccq sc struct
- */
-static void i40iw_sc_ccq_arm(struct i40iw_sc_cq *ccq)
-{
- u64 temp_val;
- u16 sw_cq_sel;
- u8 arm_next_se;
- u8 arm_seq_num;
-
- /* write to cq doorbell shadow area */
- /* arm next se should always be zero */
- get_64bit_val(ccq->cq_uk.shadow_area, 32, &temp_val);
-
- sw_cq_sel = (u16)RS_64(temp_val, I40IW_CQ_DBSA_SW_CQ_SELECT);
- arm_next_se = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_NEXT_SE);
-
- arm_seq_num = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_SEQ_NUM);
- arm_seq_num++;
-
- temp_val = LS_64(arm_seq_num, I40IW_CQ_DBSA_ARM_SEQ_NUM) |
- LS_64(sw_cq_sel, I40IW_CQ_DBSA_SW_CQ_SELECT) |
- LS_64(arm_next_se, I40IW_CQ_DBSA_ARM_NEXT_SE) |
- LS_64(1, I40IW_CQ_DBSA_ARM_NEXT);
-
- set_64bit_val(ccq->cq_uk.shadow_area, 32, temp_val);
-
- wmb(); /* make sure shadow area is updated before arming */
-
- if (ccq->dev->is_pf)
- i40iw_wr32(ccq->dev->hw, I40E_PFPE_CQARM, ccq->cq_uk.cq_id);
- else
- i40iw_wr32(ccq->dev->hw, I40E_VFPE_CQARM1, ccq->cq_uk.cq_id);
-}
-
-/**
- * i40iw_sc_ccq_get_cqe_info - get ccq's cq entry
- * @ccq: ccq sc struct
- * @info: completion q entry to return
- */
-static enum i40iw_status_code i40iw_sc_ccq_get_cqe_info(
- struct i40iw_sc_cq *ccq,
- struct i40iw_ccq_cqe_info *info)
-{
- u64 qp_ctx, temp, temp1;
- u64 *cqe;
- struct i40iw_sc_cqp *cqp;
- u32 wqe_idx;
- u8 polarity;
- enum i40iw_status_code ret_code = 0;
-
- if (ccq->cq_uk.avoid_mem_cflct)
- cqe = (u64 *)I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(&ccq->cq_uk);
- else
- cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(&ccq->cq_uk);
-
- get_64bit_val(cqe, 24, &temp);
- polarity = (u8)RS_64(temp, I40IW_CQ_VALID);
- if (polarity != ccq->cq_uk.polarity)
- return I40IW_ERR_QUEUE_EMPTY;
-
- get_64bit_val(cqe, 8, &qp_ctx);
- cqp = (struct i40iw_sc_cqp *)(unsigned long)qp_ctx;
- info->error = (bool)RS_64(temp, I40IW_CQ_ERROR);
- info->min_err_code = (u16)RS_64(temp, I40IW_CQ_MINERR);
- if (info->error) {
- info->maj_err_code = (u16)RS_64(temp, I40IW_CQ_MAJERR);
- info->min_err_code = (u16)RS_64(temp, I40IW_CQ_MINERR);
- }
- wqe_idx = (u32)RS_64(temp, I40IW_CQ_WQEIDX);
- info->scratch = cqp->scratch_array[wqe_idx];
-
- get_64bit_val(cqe, 16, &temp1);
- info->op_ret_val = (u32)RS_64(temp1, I40IW_CCQ_OPRETVAL);
- get_64bit_val(cqp->sq_base[wqe_idx].elem, 24, &temp1);
- info->op_code = (u8)RS_64(temp1, I40IW_CQPSQ_OPCODE);
- info->cqp = cqp;
-
- /* move the head for cq */
- I40IW_RING_MOVE_HEAD(ccq->cq_uk.cq_ring, ret_code);
- if (I40IW_RING_GETCURRENT_HEAD(ccq->cq_uk.cq_ring) == 0)
- ccq->cq_uk.polarity ^= 1;
-
- /* update cq tail in cq shadow memory also */
- I40IW_RING_MOVE_TAIL(ccq->cq_uk.cq_ring);
- set_64bit_val(ccq->cq_uk.shadow_area,
- 0,
- I40IW_RING_GETCURRENT_HEAD(ccq->cq_uk.cq_ring));
- wmb(); /* write shadow area before tail */
- I40IW_RING_MOVE_TAIL(cqp->sq_ring);
- ccq->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS]++;
-
- return ret_code;
-}
-
-/**
- * i40iw_sc_poll_for_cqp_op_done - Waits for last write to complete in CQP SQ
- * @cqp: struct for cqp hw
- * @op_code: cqp opcode for completion
- * @compl_info: completion q entry to return
- */
-static enum i40iw_status_code i40iw_sc_poll_for_cqp_op_done(
- struct i40iw_sc_cqp *cqp,
- u8 op_code,
- struct i40iw_ccq_cqe_info *compl_info)
-{
- struct i40iw_ccq_cqe_info info;
- struct i40iw_sc_cq *ccq;
- enum i40iw_status_code ret_code = 0;
- u32 cnt = 0;
-
- memset(&info, 0, sizeof(info));
- ccq = cqp->dev->ccq;
- while (1) {
- if (cnt++ > I40IW_DONE_COUNT)
- return I40IW_ERR_TIMEOUT;
-
- if (i40iw_sc_ccq_get_cqe_info(ccq, &info)) {
- udelay(I40IW_SLEEP_COUNT);
- continue;
- }
-
- if (info.error) {
- ret_code = I40IW_ERR_CQP_COMPL_ERROR;
- break;
- }
- /* check if opcode is cq create */
- if (op_code != info.op_code) {
- i40iw_debug(cqp->dev, I40IW_DEBUG_WQE,
- "%s: opcode mismatch for my op code 0x%x, returned opcode %x\n",
- __func__, op_code, info.op_code);
- }
- /* success, exit out of the loop */
- if (op_code == info.op_code)
- break;
- }
-
- if (compl_info)
- memcpy(compl_info, &info, sizeof(*compl_info));
-
- return ret_code;
-}
-
-/**
- * i40iw_sc_manage_hmc_pm_func_table - manage of function table
- * @cqp: struct for cqp hw
- * @scratch: u64 saved to be used during cqp completion
- * @vf_index: vf index for cqp
- * @free_pm_fcn: function number
- * @post_sq: flag for cqp db to ring
- */
-static enum i40iw_status_code i40iw_sc_manage_hmc_pm_func_table(
- struct i40iw_sc_cqp *cqp,
- u64 scratch,
- u8 vf_index,
- bool free_pm_fcn,
- bool post_sq)
-{
- u64 *wqe;
- u64 header;
-
- if (vf_index >= I40IW_MAX_VF_PER_PF)
- return I40IW_ERR_INVALID_VF_ID;
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
- if (!wqe)
- return I40IW_ERR_RING_FULL;
-
- header = LS_64(vf_index, I40IW_CQPSQ_MHMC_VFIDX) |
- LS_64(I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE, I40IW_CQPSQ_OPCODE) |
- LS_64(free_pm_fcn, I40IW_CQPSQ_MHMC_FREEPMFN) |
- LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
-
- i40iw_insert_wqe_hdr(wqe, header);
- i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_HMC_PM_FUNC_TABLE WQE",
- wqe, I40IW_CQP_WQE_SIZE * 8);
- if (post_sq)
- i40iw_sc_cqp_post_sq(cqp);
- return 0;
-}
-
-/**
- * i40iw_sc_set_hmc_resource_profile - cqp wqe for hmc profile
- * @cqp: struct for cqp hw
- * @scratch: u64 saved to be used during cqp completion
- * @hmc_profile_type: type of profile to set
- * @vf_num: vf number for profile
- * @post_sq: flag for cqp db to ring
- * @poll_registers: flag to poll register for cqp completion
- */
-static enum i40iw_status_code i40iw_sc_set_hmc_resource_profile(
- struct i40iw_sc_cqp *cqp,
- u64 scratch,
- u8 hmc_profile_type,
- u8 vf_num, bool post_sq,
- bool poll_registers)
-{
- u64 *wqe;
- u64 header;
- u32 val, tail, error;
- enum i40iw_status_code ret_code = 0;
-
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
- if (!wqe)
- return I40IW_ERR_RING_FULL;
-
- set_64bit_val(wqe, 16,
- (LS_64(hmc_profile_type, I40IW_CQPSQ_SHMCRP_HMC_PROFILE) |
- LS_64(vf_num, I40IW_CQPSQ_SHMCRP_VFNUM)));
-
- header = LS_64(I40IW_CQP_OP_SET_HMC_RESOURCE_PROFILE, I40IW_CQPSQ_OPCODE) |
- LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
-
- i40iw_insert_wqe_hdr(wqe, header);
-
- i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_HMC_PM_FUNC_TABLE WQE",
- wqe, I40IW_CQP_WQE_SIZE * 8);
-
- i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
- if (error)
- return I40IW_ERR_CQP_COMPL_ERROR;
-
- if (post_sq) {
- i40iw_sc_cqp_post_sq(cqp);
- if (poll_registers)
- ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000000);
- else
- ret_code = i40iw_sc_poll_for_cqp_op_done(cqp,
- I40IW_CQP_OP_SHMC_PAGES_ALLOCATED,
- NULL);
- }
-
- return ret_code;
-}
-
-/**
- * i40iw_sc_manage_hmc_pm_func_table_done - wait for cqp wqe completion for function table
- * @cqp: struct for cqp hw
- */
-static enum i40iw_status_code i40iw_sc_manage_hmc_pm_func_table_done(struct i40iw_sc_cqp *cqp)
-{
- return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE, NULL);
-}
-
-/**
- * i40iw_sc_commit_fpm_values_done - wait for cqp eqe completion for fpm commit
- * @cqp: struct for cqp hw
- */
-static enum i40iw_status_code i40iw_sc_commit_fpm_values_done(struct i40iw_sc_cqp *cqp)
-{
- return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_COMMIT_FPM_VALUES, NULL);
-}
-
-/**
- * i40iw_sc_commit_fpm_values - cqp wqe for commit fpm values
- * @cqp: struct for cqp hw
- * @scratch: u64 saved to be used during cqp completion
- * @hmc_fn_id: hmc function id
- * @commit_fpm_mem: Memory for fpm values
- * @post_sq: flag for cqp db to ring
- * @wait_type: poll ccq or cqp registers for cqp completion
- */
-static enum i40iw_status_code i40iw_sc_commit_fpm_values(
- struct i40iw_sc_cqp *cqp,
- u64 scratch,
- u8 hmc_fn_id,
- struct i40iw_dma_mem *commit_fpm_mem,
- bool post_sq,
- u8 wait_type)
-{
- u64 *wqe;
- u64 header;
- u32 tail, val, error;
- enum i40iw_status_code ret_code = 0;
-
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
- if (!wqe)
- return I40IW_ERR_RING_FULL;
-
- set_64bit_val(wqe, 16, hmc_fn_id);
- set_64bit_val(wqe, 32, commit_fpm_mem->pa);
-
- header = LS_64(I40IW_CQP_OP_COMMIT_FPM_VALUES, I40IW_CQPSQ_OPCODE) |
- LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
-
- i40iw_insert_wqe_hdr(wqe, header);
-
- i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "COMMIT_FPM_VALUES WQE",
- wqe, I40IW_CQP_WQE_SIZE * 8);
-
- i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
- if (error)
- return I40IW_ERR_CQP_COMPL_ERROR;
-
- if (post_sq) {
- i40iw_sc_cqp_post_sq(cqp);
-
- if (wait_type == I40IW_CQP_WAIT_POLL_REGS)
- ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT);
- else if (wait_type == I40IW_CQP_WAIT_POLL_CQ)
- ret_code = i40iw_sc_commit_fpm_values_done(cqp);
- }
-
- return ret_code;
-}
-
-/**
- * i40iw_sc_query_rdma_features_done - poll cqp for query features done
- * @cqp: struct for cqp hw
- */
-static enum i40iw_status_code
-i40iw_sc_query_rdma_features_done(struct i40iw_sc_cqp *cqp)
-{
- return i40iw_sc_poll_for_cqp_op_done(
- cqp, I40IW_CQP_OP_QUERY_RDMA_FEATURES, NULL);
-}
-
-/**
- * i40iw_sc_query_rdma_features - query rdma features
- * @cqp: struct for cqp hw
- * @feat_mem: holds PA for HW to use
- * @scratch: u64 saved to be used during cqp completion
- */
-static enum i40iw_status_code
-i40iw_sc_query_rdma_features(struct i40iw_sc_cqp *cqp,
- struct i40iw_dma_mem *feat_mem, u64 scratch)
-{
- u64 *wqe;
- u64 header;
-
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
- if (!wqe)
- return I40IW_ERR_RING_FULL;
-
- set_64bit_val(wqe, 32, feat_mem->pa);
-
- header = LS_64(I40IW_CQP_OP_QUERY_RDMA_FEATURES, I40IW_CQPSQ_OPCODE) |
- LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) | feat_mem->size;
-
- i40iw_insert_wqe_hdr(wqe, header);
-
- i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QUERY RDMA FEATURES WQE",
- wqe, I40IW_CQP_WQE_SIZE * 8);
-
- i40iw_sc_cqp_post_sq(cqp);
-
- return 0;
-}
-
-/**
- * i40iw_get_rdma_features - get RDMA features
- * @dev: sc device struct
- */
-enum i40iw_status_code i40iw_get_rdma_features(struct i40iw_sc_dev *dev)
-{
- enum i40iw_status_code ret_code;
- struct i40iw_dma_mem feat_buf;
- u64 temp;
- u16 byte_idx, feat_type, feat_cnt;
-
- ret_code = i40iw_allocate_dma_mem(dev->hw, &feat_buf,
- I40IW_FEATURE_BUF_SIZE,
- I40IW_FEATURE_BUF_ALIGNMENT);
-
- if (ret_code)
- return I40IW_ERR_NO_MEMORY;
-
- ret_code = i40iw_sc_query_rdma_features(dev->cqp, &feat_buf, 0);
- if (!ret_code)
- ret_code = i40iw_sc_query_rdma_features_done(dev->cqp);
-
- if (ret_code)
- goto exit;
-
- get_64bit_val(feat_buf.va, 0, &temp);
- feat_cnt = RS_64(temp, I40IW_FEATURE_CNT);
- if (feat_cnt < I40IW_MAX_FEATURES) {
- ret_code = I40IW_ERR_INVALID_FEAT_CNT;
- goto exit;
- } else if (feat_cnt > I40IW_MAX_FEATURES) {
- i40iw_debug(dev, I40IW_DEBUG_CQP,
- "features buf size insufficient\n");
- }
-
- for (byte_idx = 0, feat_type = 0; feat_type < I40IW_MAX_FEATURES;
- feat_type++, byte_idx += 8) {
- get_64bit_val((u64 *)feat_buf.va, byte_idx, &temp);
- dev->feature_info[feat_type] = RS_64(temp, I40IW_FEATURE_INFO);
- }
-exit:
- i40iw_free_dma_mem(dev->hw, &feat_buf);
-
- return ret_code;
-}
-
-/**
- * i40iw_sc_query_fpm_values_done - poll for cqp wqe completion for query fpm
- * @cqp: struct for cqp hw
- */
-static enum i40iw_status_code i40iw_sc_query_fpm_values_done(struct i40iw_sc_cqp *cqp)
-{
- return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_QUERY_FPM_VALUES, NULL);
-}
-
-/**
- * i40iw_sc_query_fpm_values - cqp wqe query fpm values
- * @cqp: struct for cqp hw
- * @scratch: u64 saved to be used during cqp completion
- * @hmc_fn_id: hmc function id
- * @query_fpm_mem: memory for return fpm values
- * @post_sq: flag for cqp db to ring
- * @wait_type: poll ccq or cqp registers for cqp completion
- */
-static enum i40iw_status_code i40iw_sc_query_fpm_values(
- struct i40iw_sc_cqp *cqp,
- u64 scratch,
- u8 hmc_fn_id,
- struct i40iw_dma_mem *query_fpm_mem,
- bool post_sq,
- u8 wait_type)
-{
- u64 *wqe;
- u64 header;
- u32 tail, val, error;
- enum i40iw_status_code ret_code = 0;
-
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
- if (!wqe)
- return I40IW_ERR_RING_FULL;
-
- set_64bit_val(wqe, 16, hmc_fn_id);
- set_64bit_val(wqe, 32, query_fpm_mem->pa);
-
- header = LS_64(I40IW_CQP_OP_QUERY_FPM_VALUES, I40IW_CQPSQ_OPCODE) |
- LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
-
- i40iw_insert_wqe_hdr(wqe, header);
-
- i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QUERY_FPM WQE",
- wqe, I40IW_CQP_WQE_SIZE * 8);
-
- /* read the tail from CQP_TAIL register */
- i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
-
- if (error)
- return I40IW_ERR_CQP_COMPL_ERROR;
-
- if (post_sq) {
- i40iw_sc_cqp_post_sq(cqp);
- if (wait_type == I40IW_CQP_WAIT_POLL_REGS)
- ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT);
- else if (wait_type == I40IW_CQP_WAIT_POLL_CQ)
- ret_code = i40iw_sc_query_fpm_values_done(cqp);
- }
-
- return ret_code;
-}
-
-/**
- * i40iw_sc_add_arp_cache_entry - cqp wqe add arp cache entry
- * @cqp: struct for cqp hw
- * @info: arp entry information
- * @scratch: u64 saved to be used during cqp completion
- * @post_sq: flag for cqp db to ring
- */
-static enum i40iw_status_code i40iw_sc_add_arp_cache_entry(
- struct i40iw_sc_cqp *cqp,
- struct i40iw_add_arp_cache_entry_info *info,
- u64 scratch,
- bool post_sq)
-{
- u64 *wqe;
- u64 temp, header;
-
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
- if (!wqe)
- return I40IW_ERR_RING_FULL;
- set_64bit_val(wqe, 8, info->reach_max);
-
- temp = info->mac_addr[5] |
- LS_64_1(info->mac_addr[4], 8) |
- LS_64_1(info->mac_addr[3], 16) |
- LS_64_1(info->mac_addr[2], 24) |
- LS_64_1(info->mac_addr[1], 32) |
- LS_64_1(info->mac_addr[0], 40);
-
- set_64bit_val(wqe, 16, temp);
-
- header = info->arp_index |
- LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) |
- LS_64((info->permanent ? 1 : 0), I40IW_CQPSQ_MAT_PERMANENT) |
- LS_64(1, I40IW_CQPSQ_MAT_ENTRYVALID) |
- LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
-
- i40iw_insert_wqe_hdr(wqe, header);
-
- i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ARP_CACHE_ENTRY WQE",
- wqe, I40IW_CQP_WQE_SIZE * 8);
-
- if (post_sq)
- i40iw_sc_cqp_post_sq(cqp);
- return 0;
-}
-
-/**
- * i40iw_sc_del_arp_cache_entry - dele arp cache entry
- * @cqp: struct for cqp hw
- * @scratch: u64 saved to be used during cqp completion
- * @arp_index: arp index to delete arp entry
- * @post_sq: flag for cqp db to ring
- */
-static enum i40iw_status_code i40iw_sc_del_arp_cache_entry(
- struct i40iw_sc_cqp *cqp,
- u64 scratch,
- u16 arp_index,
- bool post_sq)
-{
- u64 *wqe;
- u64 header;
-
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
- if (!wqe)
- return I40IW_ERR_RING_FULL;
-
- header = arp_index |
- LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) |
- LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
- i40iw_insert_wqe_hdr(wqe, header);
-
- i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ARP_CACHE_DEL_ENTRY WQE",
- wqe, I40IW_CQP_WQE_SIZE * 8);
-
- if (post_sq)
- i40iw_sc_cqp_post_sq(cqp);
- return 0;
-}
-
-/**
- * i40iw_sc_query_arp_cache_entry - cqp wqe to query arp and arp index
- * @cqp: struct for cqp hw
- * @scratch: u64 saved to be used during cqp completion
- * @arp_index: arp index to delete arp entry
- * @post_sq: flag for cqp db to ring
- */
-static enum i40iw_status_code i40iw_sc_query_arp_cache_entry(
- struct i40iw_sc_cqp *cqp,
- u64 scratch,
- u16 arp_index,
- bool post_sq)
-{
- u64 *wqe;
- u64 header;
-
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
- if (!wqe)
- return I40IW_ERR_RING_FULL;
-
- header = arp_index |
- LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) |
- LS_64(1, I40IW_CQPSQ_MAT_QUERY) |
- LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
-
- i40iw_insert_wqe_hdr(wqe, header);
-
- i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QUERY_ARP_CACHE_ENTRY WQE",
- wqe, I40IW_CQP_WQE_SIZE * 8);
-
- if (post_sq)
- i40iw_sc_cqp_post_sq(cqp);
- return 0;
-}
-
-/**
- * i40iw_sc_manage_apbvt_entry - for adding and deleting apbvt entries
- * @cqp: struct for cqp hw
- * @info: info for apbvt entry to add or delete
- * @scratch: u64 saved to be used during cqp completion
- * @post_sq: flag for cqp db to ring
- */
-static enum i40iw_status_code i40iw_sc_manage_apbvt_entry(
- struct i40iw_sc_cqp *cqp,
- struct i40iw_apbvt_info *info,
- u64 scratch,
- bool post_sq)
-{
- u64 *wqe;
- u64 header;
-
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
- if (!wqe)
- return I40IW_ERR_RING_FULL;
-
- set_64bit_val(wqe, 16, info->port);
-
- header = LS_64(I40IW_CQP_OP_MANAGE_APBVT, I40IW_CQPSQ_OPCODE) |
- LS_64(info->add, I40IW_CQPSQ_MAPT_ADDPORT) |
- LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
-
- i40iw_insert_wqe_hdr(wqe, header);
-
- i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_APBVT WQE",
- wqe, I40IW_CQP_WQE_SIZE * 8);
-
- if (post_sq)
- i40iw_sc_cqp_post_sq(cqp);
- return 0;
-}
-
-/**
- * i40iw_sc_manage_qhash_table_entry - manage quad hash entries
- * @cqp: struct for cqp hw
- * @info: info for quad hash to manage
- * @scratch: u64 saved to be used during cqp completion
- * @post_sq: flag for cqp db to ring
- *
- * This is called before connection establishment is started. For passive connections, when
- * listener is created, it will call with entry type of I40IW_QHASH_TYPE_TCP_SYN with local
- * ip address and tcp port. When SYN is received (passive connections) or
- * sent (active connections), this routine is called with entry type of
- * I40IW_QHASH_TYPE_TCP_ESTABLISHED and quad is passed in info.
- *
- * When iwarp connection is done and its state moves to RTS, the quad hash entry in
- * the hardware will point to iwarp's qp number and requires no calls from the driver.
- */
-static enum i40iw_status_code i40iw_sc_manage_qhash_table_entry(
- struct i40iw_sc_cqp *cqp,
- struct i40iw_qhash_table_info *info,
- u64 scratch,
- bool post_sq)
-{
- u64 *wqe;
- u64 qw1 = 0;
- u64 qw2 = 0;
- u64 temp;
- struct i40iw_sc_vsi *vsi = info->vsi;
-
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
- if (!wqe)
- return I40IW_ERR_RING_FULL;
-
- temp = info->mac_addr[5] |
- LS_64_1(info->mac_addr[4], 8) |
- LS_64_1(info->mac_addr[3], 16) |
- LS_64_1(info->mac_addr[2], 24) |
- LS_64_1(info->mac_addr[1], 32) |
- LS_64_1(info->mac_addr[0], 40);
-
- set_64bit_val(wqe, 0, temp);
-
- qw1 = LS_64(info->qp_num, I40IW_CQPSQ_QHASH_QPN) |
- LS_64(info->dest_port, I40IW_CQPSQ_QHASH_DEST_PORT);
- if (info->ipv4_valid) {
- set_64bit_val(wqe,
- 48,
- LS_64(info->dest_ip[0], I40IW_CQPSQ_QHASH_ADDR3));
- } else {
- set_64bit_val(wqe,
- 56,
- LS_64(info->dest_ip[0], I40IW_CQPSQ_QHASH_ADDR0) |
- LS_64(info->dest_ip[1], I40IW_CQPSQ_QHASH_ADDR1));
-
- set_64bit_val(wqe,
- 48,
- LS_64(info->dest_ip[2], I40IW_CQPSQ_QHASH_ADDR2) |
- LS_64(info->dest_ip[3], I40IW_CQPSQ_QHASH_ADDR3));
- }
- qw2 = LS_64(vsi->qos[info->user_pri].qs_handle, I40IW_CQPSQ_QHASH_QS_HANDLE);
- if (info->vlan_valid)
- qw2 |= LS_64(info->vlan_id, I40IW_CQPSQ_QHASH_VLANID);
- set_64bit_val(wqe, 16, qw2);
- if (info->entry_type == I40IW_QHASH_TYPE_TCP_ESTABLISHED) {
- qw1 |= LS_64(info->src_port, I40IW_CQPSQ_QHASH_SRC_PORT);
- if (!info->ipv4_valid) {
- set_64bit_val(wqe,
- 40,
- LS_64(info->src_ip[0], I40IW_CQPSQ_QHASH_ADDR0) |
- LS_64(info->src_ip[1], I40IW_CQPSQ_QHASH_ADDR1));
- set_64bit_val(wqe,
- 32,
- LS_64(info->src_ip[2], I40IW_CQPSQ_QHASH_ADDR2) |
- LS_64(info->src_ip[3], I40IW_CQPSQ_QHASH_ADDR3));
- } else {
- set_64bit_val(wqe,
- 32,
- LS_64(info->src_ip[0], I40IW_CQPSQ_QHASH_ADDR3));
- }
- }
-
- set_64bit_val(wqe, 8, qw1);
- temp = LS_64(cqp->polarity, I40IW_CQPSQ_QHASH_WQEVALID) |
- LS_64(I40IW_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY, I40IW_CQPSQ_QHASH_OPCODE) |
- LS_64(info->manage, I40IW_CQPSQ_QHASH_MANAGE) |
- LS_64(info->ipv4_valid, I40IW_CQPSQ_QHASH_IPV4VALID) |
- LS_64(info->vlan_valid, I40IW_CQPSQ_QHASH_VLANVALID) |
- LS_64(info->entry_type, I40IW_CQPSQ_QHASH_ENTRYTYPE);
-
- i40iw_insert_wqe_hdr(wqe, temp);
-
- i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_QHASH WQE",
- wqe, I40IW_CQP_WQE_SIZE * 8);
-
- if (post_sq)
- i40iw_sc_cqp_post_sq(cqp);
- return 0;
-}
-
-/**
- * i40iw_sc_alloc_local_mac_ipaddr_entry - cqp wqe for loc mac entry
- * @cqp: struct for cqp hw
- * @scratch: u64 saved to be used during cqp completion
- * @post_sq: flag for cqp db to ring
- */
-static enum i40iw_status_code i40iw_sc_alloc_local_mac_ipaddr_entry(
- struct i40iw_sc_cqp *cqp,
- u64 scratch,
- bool post_sq)
-{
- u64 *wqe;
- u64 header;
-
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
- if (!wqe)
- return I40IW_ERR_RING_FULL;
- header = LS_64(I40IW_CQP_OP_ALLOCATE_LOC_MAC_IP_TABLE_ENTRY, I40IW_CQPSQ_OPCODE) |
- LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
-
- i40iw_insert_wqe_hdr(wqe, header);
- i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ALLOCATE_LOCAL_MAC_IPADDR WQE",
- wqe, I40IW_CQP_WQE_SIZE * 8);
- if (post_sq)
- i40iw_sc_cqp_post_sq(cqp);
- return 0;
-}
-
-/**
- * i40iw_sc_add_local_mac_ipaddr_entry - add mac enry
- * @cqp: struct for cqp hw
- * @info:mac addr info
- * @scratch: u64 saved to be used during cqp completion
- * @post_sq: flag for cqp db to ring
- */
-static enum i40iw_status_code i40iw_sc_add_local_mac_ipaddr_entry(
- struct i40iw_sc_cqp *cqp,
- struct i40iw_local_mac_ipaddr_entry_info *info,
- u64 scratch,
- bool post_sq)
-{
- u64 *wqe;
- u64 temp, header;
-
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
- if (!wqe)
- return I40IW_ERR_RING_FULL;
- temp = info->mac_addr[5] |
- LS_64_1(info->mac_addr[4], 8) |
- LS_64_1(info->mac_addr[3], 16) |
- LS_64_1(info->mac_addr[2], 24) |
- LS_64_1(info->mac_addr[1], 32) |
- LS_64_1(info->mac_addr[0], 40);
-
- set_64bit_val(wqe, 32, temp);
-
- header = LS_64(info->entry_idx, I40IW_CQPSQ_MLIPA_IPTABLEIDX) |
- LS_64(I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE, I40IW_CQPSQ_OPCODE) |
- LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
-
- i40iw_insert_wqe_hdr(wqe, header);
-
- i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ADD_LOCAL_MAC_IPADDR WQE",
- wqe, I40IW_CQP_WQE_SIZE * 8);
-
- if (post_sq)
- i40iw_sc_cqp_post_sq(cqp);
- return 0;
-}
-
-/**
- * i40iw_sc_del_local_mac_ipaddr_entry - cqp wqe to dele local mac
- * @cqp: struct for cqp hw
- * @scratch: u64 saved to be used during cqp completion
- * @entry_idx: index of mac entry
- * @ignore_ref_count: to force mac adde delete
- * @post_sq: flag for cqp db to ring
- */
-static enum i40iw_status_code i40iw_sc_del_local_mac_ipaddr_entry(
- struct i40iw_sc_cqp *cqp,
- u64 scratch,
- u8 entry_idx,
- u8 ignore_ref_count,
- bool post_sq)
-{
- u64 *wqe;
- u64 header;
-
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
- if (!wqe)
- return I40IW_ERR_RING_FULL;
- header = LS_64(entry_idx, I40IW_CQPSQ_MLIPA_IPTABLEIDX) |
- LS_64(I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE, I40IW_CQPSQ_OPCODE) |
- LS_64(1, I40IW_CQPSQ_MLIPA_FREEENTRY) |
- LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) |
- LS_64(ignore_ref_count, I40IW_CQPSQ_MLIPA_IGNORE_REF_CNT);
-
- i40iw_insert_wqe_hdr(wqe, header);
-
- i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "DEL_LOCAL_MAC_IPADDR WQE",
- wqe, I40IW_CQP_WQE_SIZE * 8);
-
- if (post_sq)
- i40iw_sc_cqp_post_sq(cqp);
- return 0;
-}
-
-/**
- * i40iw_sc_cqp_nop - send a nop wqe
- * @cqp: struct for cqp hw
- * @scratch: u64 saved to be used during cqp completion
- * @post_sq: flag for cqp db to ring
- */
-static enum i40iw_status_code i40iw_sc_cqp_nop(struct i40iw_sc_cqp *cqp,
- u64 scratch,
- bool post_sq)
-{
- u64 *wqe;
- u64 header;
-
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
- if (!wqe)
- return I40IW_ERR_RING_FULL;
- header = LS_64(I40IW_CQP_OP_NOP, I40IW_CQPSQ_OPCODE) |
- LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
- i40iw_insert_wqe_hdr(wqe, header);
- i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "NOP WQE",
- wqe, I40IW_CQP_WQE_SIZE * 8);
-
- if (post_sq)
- i40iw_sc_cqp_post_sq(cqp);
- return 0;
-}
-
-/**
- * i40iw_sc_ceq_init - initialize ceq
- * @ceq: ceq sc structure
- * @info: ceq initialization info
- */
-static enum i40iw_status_code i40iw_sc_ceq_init(struct i40iw_sc_ceq *ceq,
- struct i40iw_ceq_init_info *info)
-{
- u32 pble_obj_cnt;
-
- if ((info->elem_cnt < I40IW_MIN_CEQ_ENTRIES) ||
- (info->elem_cnt > I40IW_MAX_CEQ_ENTRIES))
- return I40IW_ERR_INVALID_SIZE;
-
- if (info->ceq_id >= I40IW_MAX_CEQID)
- return I40IW_ERR_INVALID_CEQ_ID;
-
- pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
-
- if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
- return I40IW_ERR_INVALID_PBLE_INDEX;
-
- ceq->size = sizeof(*ceq);
- ceq->ceqe_base = (struct i40iw_ceqe *)info->ceqe_base;
- ceq->ceq_id = info->ceq_id;
- ceq->dev = info->dev;
- ceq->elem_cnt = info->elem_cnt;
- ceq->ceq_elem_pa = info->ceqe_pa;
- ceq->virtual_map = info->virtual_map;
-
- ceq->pbl_chunk_size = (ceq->virtual_map ? info->pbl_chunk_size : 0);
- ceq->first_pm_pbl_idx = (ceq->virtual_map ? info->first_pm_pbl_idx : 0);
- ceq->pbl_list = (ceq->virtual_map ? info->pbl_list : NULL);
-
- ceq->tph_en = info->tph_en;
- ceq->tph_val = info->tph_val;
- ceq->polarity = 1;
- I40IW_RING_INIT(ceq->ceq_ring, ceq->elem_cnt);
- ceq->dev->ceq[info->ceq_id] = ceq;
-
- return 0;
-}
-
-/**
- * i40iw_sc_ceq_create - create ceq wqe
- * @ceq: ceq sc structure
- * @scratch: u64 saved to be used during cqp completion
- * @post_sq: flag for cqp db to ring
- */
-static enum i40iw_status_code i40iw_sc_ceq_create(struct i40iw_sc_ceq *ceq,
- u64 scratch,
- bool post_sq)
-{
- struct i40iw_sc_cqp *cqp;
- u64 *wqe;
- u64 header;
-
- cqp = ceq->dev->cqp;
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
- if (!wqe)
- return I40IW_ERR_RING_FULL;
- set_64bit_val(wqe, 16, ceq->elem_cnt);
- set_64bit_val(wqe, 32, (ceq->virtual_map ? 0 : ceq->ceq_elem_pa));
- set_64bit_val(wqe, 48, (ceq->virtual_map ? ceq->first_pm_pbl_idx : 0));
- set_64bit_val(wqe, 56, LS_64(ceq->tph_val, I40IW_CQPSQ_TPHVAL));
-
- header = ceq->ceq_id |
- LS_64(I40IW_CQP_OP_CREATE_CEQ, I40IW_CQPSQ_OPCODE) |
- LS_64(ceq->pbl_chunk_size, I40IW_CQPSQ_CEQ_LPBLSIZE) |
- LS_64(ceq->virtual_map, I40IW_CQPSQ_CEQ_VMAP) |
- LS_64(ceq->tph_en, I40IW_CQPSQ_TPHEN) |
- LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
-
- i40iw_insert_wqe_hdr(wqe, header);
-
- i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CEQ_CREATE WQE",
- wqe, I40IW_CQP_WQE_SIZE * 8);
-
- if (post_sq)
- i40iw_sc_cqp_post_sq(cqp);
- return 0;
-}
-
-/**
- * i40iw_sc_cceq_create_done - poll for control ceq wqe to complete
- * @ceq: ceq sc structure
- */
-static enum i40iw_status_code i40iw_sc_cceq_create_done(struct i40iw_sc_ceq *ceq)
-{
- struct i40iw_sc_cqp *cqp;
-
- cqp = ceq->dev->cqp;
- return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_CEQ, NULL);
-}
-
-/**
- * i40iw_sc_cceq_destroy_done - poll for destroy cceq to complete
- * @ceq: ceq sc structure
- */
-static enum i40iw_status_code i40iw_sc_cceq_destroy_done(struct i40iw_sc_ceq *ceq)
-{
- struct i40iw_sc_cqp *cqp;
-
- cqp = ceq->dev->cqp;
- cqp->process_cqp_sds = i40iw_update_sds_noccq;
- return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_DESTROY_CEQ, NULL);
-}
-
-/**
- * i40iw_sc_cceq_create - create cceq
- * @ceq: ceq sc structure
- * @scratch: u64 saved to be used during cqp completion
- */
-static enum i40iw_status_code i40iw_sc_cceq_create(struct i40iw_sc_ceq *ceq, u64 scratch)
-{
- enum i40iw_status_code ret_code;
-
- ret_code = i40iw_sc_ceq_create(ceq, scratch, true);
- if (!ret_code)
- ret_code = i40iw_sc_cceq_create_done(ceq);
- return ret_code;
-}
-
-/**
- * i40iw_sc_ceq_destroy - destroy ceq
- * @ceq: ceq sc structure
- * @scratch: u64 saved to be used during cqp completion
- * @post_sq: flag for cqp db to ring
- */
-static enum i40iw_status_code i40iw_sc_ceq_destroy(struct i40iw_sc_ceq *ceq,
- u64 scratch,
- bool post_sq)
-{
- struct i40iw_sc_cqp *cqp;
- u64 *wqe;
- u64 header;
-
- cqp = ceq->dev->cqp;
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
- if (!wqe)
- return I40IW_ERR_RING_FULL;
- set_64bit_val(wqe, 16, ceq->elem_cnt);
- set_64bit_val(wqe, 48, ceq->first_pm_pbl_idx);
- header = ceq->ceq_id |
- LS_64(I40IW_CQP_OP_DESTROY_CEQ, I40IW_CQPSQ_OPCODE) |
- LS_64(ceq->pbl_chunk_size, I40IW_CQPSQ_CEQ_LPBLSIZE) |
- LS_64(ceq->virtual_map, I40IW_CQPSQ_CEQ_VMAP) |
- LS_64(ceq->tph_en, I40IW_CQPSQ_TPHEN) |
- LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
- i40iw_insert_wqe_hdr(wqe, header);
- i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CEQ_DESTROY WQE",
- wqe, I40IW_CQP_WQE_SIZE * 8);
-
- if (post_sq)
- i40iw_sc_cqp_post_sq(cqp);
- return 0;
-}
-
-/**
- * i40iw_sc_process_ceq - process ceq
- * @dev: sc device struct
- * @ceq: ceq sc structure
- */
-static void *i40iw_sc_process_ceq(struct i40iw_sc_dev *dev, struct i40iw_sc_ceq *ceq)
-{
- u64 temp;
- u64 *ceqe;
- struct i40iw_sc_cq *cq = NULL;
- u8 polarity;
-
- ceqe = (u64 *)I40IW_GET_CURRENT_CEQ_ELEMENT(ceq);
- get_64bit_val(ceqe, 0, &temp);
- polarity = (u8)RS_64(temp, I40IW_CEQE_VALID);
- if (polarity != ceq->polarity)
- return cq;
-
- cq = (struct i40iw_sc_cq *)(unsigned long)LS_64_1(temp, 1);
-
- I40IW_RING_MOVE_TAIL(ceq->ceq_ring);
- if (I40IW_RING_GETCURRENT_TAIL(ceq->ceq_ring) == 0)
- ceq->polarity ^= 1;
-
- if (dev->is_pf)
- i40iw_wr32(dev->hw, I40E_PFPE_CQACK, cq->cq_uk.cq_id);
- else
- i40iw_wr32(dev->hw, I40E_VFPE_CQACK1, cq->cq_uk.cq_id);
-
- return cq;
-}
-
-/**
- * i40iw_sc_aeq_init - initialize aeq
- * @aeq: aeq structure ptr
- * @info: aeq initialization info
- */
-static enum i40iw_status_code i40iw_sc_aeq_init(struct i40iw_sc_aeq *aeq,
- struct i40iw_aeq_init_info *info)
-{
- u32 pble_obj_cnt;
-
- if ((info->elem_cnt < I40IW_MIN_AEQ_ENTRIES) ||
- (info->elem_cnt > I40IW_MAX_AEQ_ENTRIES))
- return I40IW_ERR_INVALID_SIZE;
- pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
-
- if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
- return I40IW_ERR_INVALID_PBLE_INDEX;
-
- aeq->size = sizeof(*aeq);
- aeq->polarity = 1;
- aeq->aeqe_base = (struct i40iw_sc_aeqe *)info->aeqe_base;
- aeq->dev = info->dev;
- aeq->elem_cnt = info->elem_cnt;
-
- aeq->aeq_elem_pa = info->aeq_elem_pa;
- I40IW_RING_INIT(aeq->aeq_ring, aeq->elem_cnt);
- info->dev->aeq = aeq;
-
- aeq->virtual_map = info->virtual_map;
- aeq->pbl_list = (aeq->virtual_map ? info->pbl_list : NULL);
- aeq->pbl_chunk_size = (aeq->virtual_map ? info->pbl_chunk_size : 0);
- aeq->first_pm_pbl_idx = (aeq->virtual_map ? info->first_pm_pbl_idx : 0);
- info->dev->aeq = aeq;
- return 0;
-}
-
-/**
- * i40iw_sc_aeq_create - create aeq
- * @aeq: aeq structure ptr
- * @scratch: u64 saved to be used during cqp completion
- * @post_sq: flag for cqp db to ring
- */
-static enum i40iw_status_code i40iw_sc_aeq_create(struct i40iw_sc_aeq *aeq,
- u64 scratch,
- bool post_sq)
-{
- u64 *wqe;
- struct i40iw_sc_cqp *cqp;
- u64 header;
-
- cqp = aeq->dev->cqp;
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
- if (!wqe)
- return I40IW_ERR_RING_FULL;
- set_64bit_val(wqe, 16, aeq->elem_cnt);
- set_64bit_val(wqe, 32,
- (aeq->virtual_map ? 0 : aeq->aeq_elem_pa));
- set_64bit_val(wqe, 48,
- (aeq->virtual_map ? aeq->first_pm_pbl_idx : 0));
-
- header = LS_64(I40IW_CQP_OP_CREATE_AEQ, I40IW_CQPSQ_OPCODE) |
- LS_64(aeq->pbl_chunk_size, I40IW_CQPSQ_AEQ_LPBLSIZE) |
- LS_64(aeq->virtual_map, I40IW_CQPSQ_AEQ_VMAP) |
- LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
-
- i40iw_insert_wqe_hdr(wqe, header);
- i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "AEQ_CREATE WQE",
- wqe, I40IW_CQP_WQE_SIZE * 8);
- if (post_sq)
- i40iw_sc_cqp_post_sq(cqp);
- return 0;
-}
-
-/**
- * i40iw_sc_aeq_destroy - destroy aeq during close
- * @aeq: aeq structure ptr
- * @scratch: u64 saved to be used during cqp completion
- * @post_sq: flag for cqp db to ring
- */
-static enum i40iw_status_code i40iw_sc_aeq_destroy(struct i40iw_sc_aeq *aeq,
- u64 scratch,
- bool post_sq)
-{
- u64 *wqe;
- struct i40iw_sc_cqp *cqp;
- u64 header;
-
- cqp = aeq->dev->cqp;
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
- if (!wqe)
- return I40IW_ERR_RING_FULL;
- set_64bit_val(wqe, 16, aeq->elem_cnt);
- set_64bit_val(wqe, 48, aeq->first_pm_pbl_idx);
- header = LS_64(I40IW_CQP_OP_DESTROY_AEQ, I40IW_CQPSQ_OPCODE) |
- LS_64(aeq->pbl_chunk_size, I40IW_CQPSQ_AEQ_LPBLSIZE) |
- LS_64(aeq->virtual_map, I40IW_CQPSQ_AEQ_VMAP) |
- LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
- i40iw_insert_wqe_hdr(wqe, header);
-
- i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "AEQ_DESTROY WQE",
- wqe, I40IW_CQP_WQE_SIZE * 8);
- if (post_sq)
- i40iw_sc_cqp_post_sq(cqp);
- return 0;
-}
-
-/**
- * i40iw_sc_get_next_aeqe - get next aeq entry
- * @aeq: aeq structure ptr
- * @info: aeqe info to be returned
- */
-static enum i40iw_status_code i40iw_sc_get_next_aeqe(struct i40iw_sc_aeq *aeq,
- struct i40iw_aeqe_info *info)
-{
- u64 temp, compl_ctx;
- u64 *aeqe;
- u16 wqe_idx;
- u8 ae_src;
- u8 polarity;
-
- aeqe = (u64 *)I40IW_GET_CURRENT_AEQ_ELEMENT(aeq);
- get_64bit_val(aeqe, 0, &compl_ctx);
- get_64bit_val(aeqe, 8, &temp);
- polarity = (u8)RS_64(temp, I40IW_AEQE_VALID);
-
- if (aeq->polarity != polarity)
- return I40IW_ERR_QUEUE_EMPTY;
-
- i40iw_debug_buf(aeq->dev, I40IW_DEBUG_WQE, "AEQ_ENTRY", aeqe, 16);
-
- ae_src = (u8)RS_64(temp, I40IW_AEQE_AESRC);
- wqe_idx = (u16)RS_64(temp, I40IW_AEQE_WQDESCIDX);
- info->qp_cq_id = (u32)RS_64(temp, I40IW_AEQE_QPCQID);
- info->ae_id = (u16)RS_64(temp, I40IW_AEQE_AECODE);
- info->tcp_state = (u8)RS_64(temp, I40IW_AEQE_TCPSTATE);
- info->iwarp_state = (u8)RS_64(temp, I40IW_AEQE_IWSTATE);
- info->q2_data_written = (u8)RS_64(temp, I40IW_AEQE_Q2DATA);
- info->aeqe_overflow = (bool)RS_64(temp, I40IW_AEQE_OVERFLOW);
-
- switch (info->ae_id) {
- case I40IW_AE_PRIV_OPERATION_DENIED:
- case I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG:
- case I40IW_AE_UDA_XMIT_DGRAM_TOO_SHORT:
- case I40IW_AE_BAD_CLOSE:
- case I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE:
- case I40IW_AE_RDMA_READ_WHILE_ORD_ZERO:
- case I40IW_AE_STAG_ZERO_INVALID:
- case I40IW_AE_IB_RREQ_AND_Q1_FULL:
- case I40IW_AE_WQE_UNEXPECTED_OPCODE:
- case I40IW_AE_DDP_UBE_INVALID_DDP_VERSION:
- case I40IW_AE_DDP_UBE_INVALID_MO:
- case I40IW_AE_DDP_UBE_INVALID_QN:
- case I40IW_AE_DDP_NO_L_BIT:
- case I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
- case I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
- case I40IW_AE_ROE_INVALID_RDMA_READ_REQUEST:
- case I40IW_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
- case I40IW_AE_INVALID_ARP_ENTRY:
- case I40IW_AE_INVALID_TCP_OPTION_RCVD:
- case I40IW_AE_STALE_ARP_ENTRY:
- case I40IW_AE_LLP_CLOSE_COMPLETE:
- case I40IW_AE_LLP_CONNECTION_RESET:
- case I40IW_AE_LLP_FIN_RECEIVED:
- case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR:
- case I40IW_AE_LLP_SEGMENT_TOO_SMALL:
- case I40IW_AE_LLP_SYN_RECEIVED:
- case I40IW_AE_LLP_TERMINATE_RECEIVED:
- case I40IW_AE_LLP_TOO_MANY_RETRIES:
- case I40IW_AE_LLP_DOUBT_REACHABILITY:
- case I40IW_AE_RESET_SENT:
- case I40IW_AE_TERMINATE_SENT:
- case I40IW_AE_RESET_NOT_SENT:
- case I40IW_AE_LCE_QP_CATASTROPHIC:
- case I40IW_AE_QP_SUSPEND_COMPLETE:
- info->qp = true;
- info->compl_ctx = compl_ctx;
- ae_src = I40IW_AE_SOURCE_RSVD;
- break;
- case I40IW_AE_LCE_CQ_CATASTROPHIC:
- info->cq = true;
- info->compl_ctx = LS_64_1(compl_ctx, 1);
- ae_src = I40IW_AE_SOURCE_RSVD;
- break;
- }
-
- switch (ae_src) {
- case I40IW_AE_SOURCE_RQ:
- case I40IW_AE_SOURCE_RQ_0011:
- info->qp = true;
- info->wqe_idx = wqe_idx;
- info->compl_ctx = compl_ctx;
- break;
- case I40IW_AE_SOURCE_CQ:
- case I40IW_AE_SOURCE_CQ_0110:
- case I40IW_AE_SOURCE_CQ_1010:
- case I40IW_AE_SOURCE_CQ_1110:
- info->cq = true;
- info->compl_ctx = LS_64_1(compl_ctx, 1);
- break;
- case I40IW_AE_SOURCE_SQ:
- case I40IW_AE_SOURCE_SQ_0111:
- info->qp = true;
- info->sq = true;
- info->wqe_idx = wqe_idx;
- info->compl_ctx = compl_ctx;
- break;
- case I40IW_AE_SOURCE_IN_RR_WR:
- case I40IW_AE_SOURCE_IN_RR_WR_1011:
- info->qp = true;
- info->compl_ctx = compl_ctx;
- info->in_rdrsp_wr = true;
- break;
- case I40IW_AE_SOURCE_OUT_RR:
- case I40IW_AE_SOURCE_OUT_RR_1111:
- info->qp = true;
- info->compl_ctx = compl_ctx;
- info->out_rdrsp = true;
- break;
- case I40IW_AE_SOURCE_RSVD:
- default:
- break;
- }
- I40IW_RING_MOVE_TAIL(aeq->aeq_ring);
- if (I40IW_RING_GETCURRENT_TAIL(aeq->aeq_ring) == 0)
- aeq->polarity ^= 1;
- return 0;
-}
-
-/**
- * i40iw_sc_repost_aeq_entries - repost completed aeq entries
- * @dev: sc device struct
- * @count: allocate count
- */
-static enum i40iw_status_code i40iw_sc_repost_aeq_entries(struct i40iw_sc_dev *dev,
- u32 count)
-{
-
- if (dev->is_pf)
- i40iw_wr32(dev->hw, I40E_PFPE_AEQALLOC, count);
- else
- i40iw_wr32(dev->hw, I40E_VFPE_AEQALLOC1, count);
-
- return 0;
-}
-
-/**
- * i40iw_sc_aeq_create_done - create aeq
- * @aeq: aeq structure ptr
- */
-static enum i40iw_status_code i40iw_sc_aeq_create_done(struct i40iw_sc_aeq *aeq)
-{
- struct i40iw_sc_cqp *cqp;
-
- cqp = aeq->dev->cqp;
- return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_AEQ, NULL);
-}
-
-/**
- * i40iw_sc_aeq_destroy_done - destroy of aeq during close
- * @aeq: aeq structure ptr
- */
-static enum i40iw_status_code i40iw_sc_aeq_destroy_done(struct i40iw_sc_aeq *aeq)
-{
- struct i40iw_sc_cqp *cqp;
-
- cqp = aeq->dev->cqp;
- return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_DESTROY_AEQ, NULL);
-}
-
-/**
- * i40iw_sc_ccq_init - initialize control cq
- * @cq: sc's cq ctruct
- * @info: info for control cq initialization
- */
-static enum i40iw_status_code i40iw_sc_ccq_init(struct i40iw_sc_cq *cq,
- struct i40iw_ccq_init_info *info)
-{
- u32 pble_obj_cnt;
-
- if (info->num_elem < I40IW_MIN_CQ_SIZE || info->num_elem > I40IW_MAX_CQ_SIZE)
- return I40IW_ERR_INVALID_SIZE;
-
- if (info->ceq_id > I40IW_MAX_CEQID)
- return I40IW_ERR_INVALID_CEQ_ID;
-
- pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
-
- if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
- return I40IW_ERR_INVALID_PBLE_INDEX;
-
- cq->cq_pa = info->cq_pa;
- cq->cq_uk.cq_base = info->cq_base;
- cq->shadow_area_pa = info->shadow_area_pa;
- cq->cq_uk.shadow_area = info->shadow_area;
- cq->shadow_read_threshold = info->shadow_read_threshold;
- cq->dev = info->dev;
- cq->ceq_id = info->ceq_id;
- cq->cq_uk.cq_size = info->num_elem;
- cq->cq_type = I40IW_CQ_TYPE_CQP;
- cq->ceqe_mask = info->ceqe_mask;
- I40IW_RING_INIT(cq->cq_uk.cq_ring, info->num_elem);
-
- cq->cq_uk.cq_id = 0; /* control cq is id 0 always */
- cq->ceq_id_valid = info->ceq_id_valid;
- cq->tph_en = info->tph_en;
- cq->tph_val = info->tph_val;
- cq->cq_uk.avoid_mem_cflct = info->avoid_mem_cflct;
-
- cq->pbl_list = info->pbl_list;
- cq->virtual_map = info->virtual_map;
- cq->pbl_chunk_size = info->pbl_chunk_size;
- cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
- cq->cq_uk.polarity = true;
-
- /* following are only for iw cqs so initialize them to zero */
- cq->cq_uk.cqe_alloc_reg = NULL;
- info->dev->ccq = cq;
- return 0;
-}
-
-/**
- * i40iw_sc_ccq_create_done - poll cqp for ccq create
- * @ccq: ccq sc struct
- */
-static enum i40iw_status_code i40iw_sc_ccq_create_done(struct i40iw_sc_cq *ccq)
-{
- struct i40iw_sc_cqp *cqp;
-
- cqp = ccq->dev->cqp;
- return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_CQ, NULL);
-}
-
-/**
- * i40iw_sc_ccq_create - create control cq
- * @ccq: ccq sc struct
- * @scratch: u64 saved to be used during cqp completion
- * @check_overflow: overlow flag for ccq
- * @post_sq: flag for cqp db to ring
- */
-static enum i40iw_status_code i40iw_sc_ccq_create(struct i40iw_sc_cq *ccq,
- u64 scratch,
- bool check_overflow,
- bool post_sq)
-{
- u64 *wqe;
- struct i40iw_sc_cqp *cqp;
- u64 header;
- enum i40iw_status_code ret_code;
-
- cqp = ccq->dev->cqp;
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
- if (!wqe)
- return I40IW_ERR_RING_FULL;
- set_64bit_val(wqe, 0, ccq->cq_uk.cq_size);
- set_64bit_val(wqe, 8, RS_64_1(ccq, 1));
- set_64bit_val(wqe, 16,
- LS_64(ccq->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
- set_64bit_val(wqe, 32, (ccq->virtual_map ? 0 : ccq->cq_pa));
- set_64bit_val(wqe, 40, ccq->shadow_area_pa);
- set_64bit_val(wqe, 48,
- (ccq->virtual_map ? ccq->first_pm_pbl_idx : 0));
- set_64bit_val(wqe, 56,
- LS_64(ccq->tph_val, I40IW_CQPSQ_TPHVAL));
-
- header = ccq->cq_uk.cq_id |
- LS_64((ccq->ceq_id_valid ? ccq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
- LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |
- LS_64(ccq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
- LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
- LS_64(ccq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
- LS_64(ccq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
- LS_64(ccq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
- LS_64(ccq->tph_en, I40IW_CQPSQ_TPHEN) |
- LS_64(ccq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
- LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
-
- i40iw_insert_wqe_hdr(wqe, header);
-
- i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CCQ_CREATE WQE",
- wqe, I40IW_CQP_WQE_SIZE * 8);
-
- if (post_sq) {
- i40iw_sc_cqp_post_sq(cqp);
- ret_code = i40iw_sc_ccq_create_done(ccq);
- if (ret_code)
- return ret_code;
- }
- cqp->process_cqp_sds = i40iw_cqp_sds_cmd;
-
- return 0;
-}
-
-/**
- * i40iw_sc_ccq_destroy - destroy ccq during close
- * @ccq: ccq sc struct
- * @scratch: u64 saved to be used during cqp completion
- * @post_sq: flag for cqp db to ring
- */
-static enum i40iw_status_code i40iw_sc_ccq_destroy(struct i40iw_sc_cq *ccq,
- u64 scratch,
- bool post_sq)
-{
- struct i40iw_sc_cqp *cqp;
- u64 *wqe;
- u64 header;
- enum i40iw_status_code ret_code = 0;
- u32 tail, val, error;
-
- cqp = ccq->dev->cqp;
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
- if (!wqe)
- return I40IW_ERR_RING_FULL;
- set_64bit_val(wqe, 0, ccq->cq_uk.cq_size);
- set_64bit_val(wqe, 8, RS_64_1(ccq, 1));
- set_64bit_val(wqe, 40, ccq->shadow_area_pa);
-
- header = ccq->cq_uk.cq_id |
- LS_64((ccq->ceq_id_valid ? ccq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
- LS_64(I40IW_CQP_OP_DESTROY_CQ, I40IW_CQPSQ_OPCODE) |
- LS_64(ccq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
- LS_64(ccq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
- LS_64(ccq->tph_en, I40IW_CQPSQ_TPHEN) |
- LS_64(ccq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
- LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
-
- i40iw_insert_wqe_hdr(wqe, header);
-
- i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CCQ_DESTROY WQE",
- wqe, I40IW_CQP_WQE_SIZE * 8);
-
- i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
- if (error)
- return I40IW_ERR_CQP_COMPL_ERROR;
-
- if (post_sq) {
- i40iw_sc_cqp_post_sq(cqp);
- ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000);
- }
-
- cqp->process_cqp_sds = i40iw_update_sds_noccq;
-
- return ret_code;
-}
-
-/**
- * i40iw_sc_cq_init - initialize completion q
- * @cq: cq struct
- * @info: cq initialization info
- */
-static enum i40iw_status_code i40iw_sc_cq_init(struct i40iw_sc_cq *cq,
- struct i40iw_cq_init_info *info)
-{
- u32 __iomem *cqe_alloc_reg = NULL;
- enum i40iw_status_code ret_code;
- u32 pble_obj_cnt;
- u32 arm_offset;
-
- pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
-
- if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
- return I40IW_ERR_INVALID_PBLE_INDEX;
-
- cq->cq_pa = info->cq_base_pa;
- cq->dev = info->dev;
- cq->ceq_id = info->ceq_id;
- arm_offset = (info->dev->is_pf) ? I40E_PFPE_CQARM : I40E_VFPE_CQARM1;
- if (i40iw_get_hw_addr(cq->dev))
- cqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(cq->dev) +
- arm_offset);
- info->cq_uk_init_info.cqe_alloc_reg = cqe_alloc_reg;
- ret_code = i40iw_cq_uk_init(&cq->cq_uk, &info->cq_uk_init_info);
- if (ret_code)
- return ret_code;
- cq->virtual_map = info->virtual_map;
- cq->pbl_chunk_size = info->pbl_chunk_size;
- cq->ceqe_mask = info->ceqe_mask;
- cq->cq_type = (info->type) ? info->type : I40IW_CQ_TYPE_IWARP;
-
- cq->shadow_area_pa = info->shadow_area_pa;
- cq->shadow_read_threshold = info->shadow_read_threshold;
-
- cq->ceq_id_valid = info->ceq_id_valid;
- cq->tph_en = info->tph_en;
- cq->tph_val = info->tph_val;
-
- cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
-
- return 0;
-}
-
-/**
- * i40iw_sc_cq_create - create completion q
- * @cq: cq struct
- * @scratch: u64 saved to be used during cqp completion
- * @check_overflow: flag for overflow check
- * @post_sq: flag for cqp db to ring
- */
-static enum i40iw_status_code i40iw_sc_cq_create(struct i40iw_sc_cq *cq,
- u64 scratch,
- bool check_overflow,
- bool post_sq)
-{
- u64 *wqe;
- struct i40iw_sc_cqp *cqp;
- u64 header;
-
- if (cq->cq_uk.cq_id > I40IW_MAX_CQID)
- return I40IW_ERR_INVALID_CQ_ID;
-
- if (cq->ceq_id > I40IW_MAX_CEQID)
- return I40IW_ERR_INVALID_CEQ_ID;
-
- cqp = cq->dev->cqp;
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
- if (!wqe)
- return I40IW_ERR_RING_FULL;
-
- set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
- set_64bit_val(wqe, 8, RS_64_1(cq, 1));
- set_64bit_val(wqe,
- 16,
- LS_64(cq->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
-
- set_64bit_val(wqe, 32, (cq->virtual_map ? 0 : cq->cq_pa));
-
- set_64bit_val(wqe, 40, cq->shadow_area_pa);
- set_64bit_val(wqe, 48, (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
- set_64bit_val(wqe, 56, LS_64(cq->tph_val, I40IW_CQPSQ_TPHVAL));
-
- header = cq->cq_uk.cq_id |
- LS_64((cq->ceq_id_valid ? cq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
- LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |
- LS_64(cq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
- LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
- LS_64(cq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
- LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
- LS_64(cq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
- LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) |
- LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
- LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
-
- i40iw_insert_wqe_hdr(wqe, header);
-
- i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_CREATE WQE",
- wqe, I40IW_CQP_WQE_SIZE * 8);
-
- if (post_sq)
- i40iw_sc_cqp_post_sq(cqp);
- return 0;
-}
-
-/**
- * i40iw_sc_cq_destroy - destroy completion q
- * @cq: cq struct
- * @scratch: u64 saved to be used during cqp completion
- * @post_sq: flag for cqp db to ring
- */
-static enum i40iw_status_code i40iw_sc_cq_destroy(struct i40iw_sc_cq *cq,
- u64 scratch,
- bool post_sq)
-{
- struct i40iw_sc_cqp *cqp;
- u64 *wqe;
- u64 header;
-
- cqp = cq->dev->cqp;
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
- if (!wqe)
- return I40IW_ERR_RING_FULL;
- set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
- set_64bit_val(wqe, 8, RS_64_1(cq, 1));
- set_64bit_val(wqe, 40, cq->shadow_area_pa);
- set_64bit_val(wqe, 48, (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
-
- header = cq->cq_uk.cq_id |
- LS_64((cq->ceq_id_valid ? cq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
- LS_64(I40IW_CQP_OP_DESTROY_CQ, I40IW_CQPSQ_OPCODE) |
- LS_64(cq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
- LS_64(cq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
- LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
- LS_64(cq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
- LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) |
- LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
- LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
-
- i40iw_insert_wqe_hdr(wqe, header);
-
- i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_DESTROY WQE",
- wqe, I40IW_CQP_WQE_SIZE * 8);
-
- if (post_sq)
- i40iw_sc_cqp_post_sq(cqp);
- return 0;
-}
-
-/**
- * i40iw_sc_cq_modify - modify a Completion Queue
- * @cq: cq struct
- * @info: modification info struct
- * @scratch: u64 saved to be used during cqp completion
- * @post_sq: flag to post to sq
- */
-static enum i40iw_status_code i40iw_sc_cq_modify(struct i40iw_sc_cq *cq,
- struct i40iw_modify_cq_info *info,
- u64 scratch,
- bool post_sq)
-{
- struct i40iw_sc_cqp *cqp;
- u64 *wqe;
- u64 header;
- u32 cq_size, ceq_id, first_pm_pbl_idx;
- u8 pbl_chunk_size;
- bool virtual_map, ceq_id_valid, check_overflow;
- u32 pble_obj_cnt;
-
- if (info->ceq_valid && (info->ceq_id > I40IW_MAX_CEQID))
- return I40IW_ERR_INVALID_CEQ_ID;
-
- pble_obj_cnt = cq->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
-
- if (info->cq_resize && info->virtual_map &&
- (info->first_pm_pbl_idx >= pble_obj_cnt))
- return I40IW_ERR_INVALID_PBLE_INDEX;
-
- cqp = cq->dev->cqp;
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
- if (!wqe)
- return I40IW_ERR_RING_FULL;
-
- cq->pbl_list = info->pbl_list;
- cq->cq_pa = info->cq_pa;
- cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
-
- cq_size = info->cq_resize ? info->cq_size : cq->cq_uk.cq_size;
- if (info->ceq_change) {
- ceq_id_valid = true;
- ceq_id = info->ceq_id;
- } else {
- ceq_id_valid = cq->ceq_id_valid;
- ceq_id = ceq_id_valid ? cq->ceq_id : 0;
- }
- virtual_map = info->cq_resize ? info->virtual_map : cq->virtual_map;
- first_pm_pbl_idx = (info->cq_resize ?
- (info->virtual_map ? info->first_pm_pbl_idx : 0) :
- (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
- pbl_chunk_size = (info->cq_resize ?
- (info->virtual_map ? info->pbl_chunk_size : 0) :
- (cq->virtual_map ? cq->pbl_chunk_size : 0));
- check_overflow = info->check_overflow_change ? info->check_overflow :
- cq->check_overflow;
- cq->cq_uk.cq_size = cq_size;
- cq->ceq_id_valid = ceq_id_valid;
- cq->ceq_id = ceq_id;
- cq->virtual_map = virtual_map;
- cq->first_pm_pbl_idx = first_pm_pbl_idx;
- cq->pbl_chunk_size = pbl_chunk_size;
- cq->check_overflow = check_overflow;
-
- set_64bit_val(wqe, 0, cq_size);
- set_64bit_val(wqe, 8, RS_64_1(cq, 1));
- set_64bit_val(wqe, 16,
- LS_64(info->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
- set_64bit_val(wqe, 32, (cq->virtual_map ? 0 : cq->cq_pa));
- set_64bit_val(wqe, 40, cq->shadow_area_pa);
- set_64bit_val(wqe, 48, (cq->virtual_map ? first_pm_pbl_idx : 0));
- set_64bit_val(wqe, 56, LS_64(cq->tph_val, I40IW_CQPSQ_TPHVAL));
-
- header = cq->cq_uk.cq_id |
- LS_64(ceq_id, I40IW_CQPSQ_CQ_CEQID) |
- LS_64(I40IW_CQP_OP_MODIFY_CQ, I40IW_CQPSQ_OPCODE) |
- LS_64(info->cq_resize, I40IW_CQPSQ_CQ_CQRESIZE) |
- LS_64(pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
- LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
- LS_64(virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
- LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
- LS_64(ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
- LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) |
- LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
- LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
-
- i40iw_insert_wqe_hdr(wqe, header);
-
- i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_MODIFY WQE",
- wqe, I40IW_CQP_WQE_SIZE * 8);
-
- if (post_sq)
- i40iw_sc_cqp_post_sq(cqp);
- return 0;
-}
-
-/**
- * i40iw_sc_qp_init - initialize qp
- * @qp: sc qp
- * @info: initialization qp info
- */
-static enum i40iw_status_code i40iw_sc_qp_init(struct i40iw_sc_qp *qp,
- struct i40iw_qp_init_info *info)
-{
- u32 __iomem *wqe_alloc_reg = NULL;
- enum i40iw_status_code ret_code;
- u32 pble_obj_cnt;
- u8 wqe_size;
- u32 offset;
-
- qp->dev = info->pd->dev;
- qp->vsi = info->vsi;
- qp->sq_pa = info->sq_pa;
- qp->rq_pa = info->rq_pa;
- qp->hw_host_ctx_pa = info->host_ctx_pa;
- qp->q2_pa = info->q2_pa;
- qp->shadow_area_pa = info->shadow_area_pa;
-
- qp->q2_buf = info->q2;
- qp->pd = info->pd;
- qp->hw_host_ctx = info->host_ctx;
- offset = (qp->pd->dev->is_pf) ? I40E_PFPE_WQEALLOC : I40E_VFPE_WQEALLOC1;
- if (i40iw_get_hw_addr(qp->pd->dev))
- wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) +
- offset);
-
- info->qp_uk_init_info.wqe_alloc_reg = wqe_alloc_reg;
- info->qp_uk_init_info.abi_ver = qp->pd->abi_ver;
- ret_code = i40iw_qp_uk_init(&qp->qp_uk, &info->qp_uk_init_info);
- if (ret_code)
- return ret_code;
- qp->virtual_map = info->virtual_map;
-
- pble_obj_cnt = info->pd->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
-
- if ((info->virtual_map && (info->sq_pa >= pble_obj_cnt)) ||
- (info->virtual_map && (info->rq_pa >= pble_obj_cnt)))
- return I40IW_ERR_INVALID_PBLE_INDEX;
-
- qp->llp_stream_handle = (void *)(-1);
- qp->qp_type = (info->type) ? info->type : I40IW_QP_TYPE_IWARP;
-
- qp->hw_sq_size = i40iw_get_encoded_wqe_size(qp->qp_uk.sq_ring.size,
- false);
- i40iw_debug(qp->dev, I40IW_DEBUG_WQE, "%s: hw_sq_size[%04d] sq_ring.size[%04d]\n",
- __func__, qp->hw_sq_size, qp->qp_uk.sq_ring.size);
-
- switch (qp->pd->abi_ver) {
- case 4:
- ret_code = i40iw_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt,
- &wqe_size);
- if (ret_code)
- return ret_code;
- break;
- case 5: /* fallthrough until next ABI version */
- default:
- if (qp->qp_uk.max_rq_frag_cnt > I40IW_MAX_WQ_FRAGMENT_COUNT)
- return I40IW_ERR_INVALID_FRAG_COUNT;
- wqe_size = I40IW_MAX_WQE_SIZE_RQ;
- break;
- }
- qp->hw_rq_size = i40iw_get_encoded_wqe_size(qp->qp_uk.rq_size *
- (wqe_size / I40IW_QP_WQE_MIN_SIZE), false);
- i40iw_debug(qp->dev, I40IW_DEBUG_WQE,
- "%s: hw_rq_size[%04d] qp_uk.rq_size[%04d] wqe_size[%04d]\n",
- __func__, qp->hw_rq_size, qp->qp_uk.rq_size, wqe_size);
- qp->sq_tph_val = info->sq_tph_val;
- qp->rq_tph_val = info->rq_tph_val;
- qp->sq_tph_en = info->sq_tph_en;
- qp->rq_tph_en = info->rq_tph_en;
- qp->rcv_tph_en = info->rcv_tph_en;
- qp->xmit_tph_en = info->xmit_tph_en;
- qp->qs_handle = qp->vsi->qos[qp->user_pri].qs_handle;
-
- return 0;
-}
-
-/**
- * i40iw_sc_qp_create - create qp
- * @qp: sc qp
- * @info: qp create info
- * @scratch: u64 saved to be used during cqp completion
- * @post_sq: flag for cqp db to ring
- */
-static enum i40iw_status_code i40iw_sc_qp_create(
- struct i40iw_sc_qp *qp,
- struct i40iw_create_qp_info *info,
- u64 scratch,
- bool post_sq)
-{
- struct i40iw_sc_cqp *cqp;
- u64 *wqe;
- u64 header;
-
- if ((qp->qp_uk.qp_id < I40IW_MIN_IW_QP_ID) ||
- (qp->qp_uk.qp_id > I40IW_MAX_IW_QP_ID))
- return I40IW_ERR_INVALID_QP_ID;
-
- cqp = qp->pd->dev->cqp;
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
- if (!wqe)
- return I40IW_ERR_RING_FULL;
-
- set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
-
- set_64bit_val(wqe, 40, qp->shadow_area_pa);
-
- header = qp->qp_uk.qp_id |
- LS_64(I40IW_CQP_OP_CREATE_QP, I40IW_CQPSQ_OPCODE) |
- LS_64((info->ord_valid ? 1 : 0), I40IW_CQPSQ_QP_ORDVALID) |
- LS_64(info->tcp_ctx_valid, I40IW_CQPSQ_QP_TOECTXVALID) |
- LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
- LS_64(qp->virtual_map, I40IW_CQPSQ_QP_VQ) |
- LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) |
- LS_64(info->arp_cache_idx_valid, I40IW_CQPSQ_QP_ARPTABIDXVALID) |
- LS_64(info->next_iwarp_state, I40IW_CQPSQ_QP_NEXTIWSTATE) |
- LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
-
- i40iw_insert_wqe_hdr(wqe, header);
- i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_CREATE WQE",
- wqe, I40IW_CQP_WQE_SIZE * 8);
-
- if (post_sq)
- i40iw_sc_cqp_post_sq(cqp);
- return 0;
-}
-
-/**
- * i40iw_sc_qp_modify - modify qp cqp wqe
- * @qp: sc qp
- * @info: modify qp info
- * @scratch: u64 saved to be used during cqp completion
- * @post_sq: flag for cqp db to ring
- */
-static enum i40iw_status_code i40iw_sc_qp_modify(
- struct i40iw_sc_qp *qp,
- struct i40iw_modify_qp_info *info,
- u64 scratch,
- bool post_sq)
-{
- u64 *wqe;
- struct i40iw_sc_cqp *cqp;
- u64 header;
- u8 term_actions = 0;
- u8 term_len = 0;
-
- cqp = qp->pd->dev->cqp;
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
- if (!wqe)
- return I40IW_ERR_RING_FULL;
- if (info->next_iwarp_state == I40IW_QP_STATE_TERMINATE) {
- if (info->dont_send_fin)
- term_actions += I40IWQP_TERM_SEND_TERM_ONLY;
- if (info->dont_send_term)
- term_actions += I40IWQP_TERM_SEND_FIN_ONLY;
- if ((term_actions == I40IWQP_TERM_SEND_TERM_AND_FIN) ||
- (term_actions == I40IWQP_TERM_SEND_TERM_ONLY))
- term_len = info->termlen;
- }
-
- set_64bit_val(wqe,
- 8,
- LS_64(term_len, I40IW_CQPSQ_QP_TERMLEN));
-
- set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
- set_64bit_val(wqe, 40, qp->shadow_area_pa);
-
- header = qp->qp_uk.qp_id |
- LS_64(I40IW_CQP_OP_MODIFY_QP, I40IW_CQPSQ_OPCODE) |
- LS_64(info->ord_valid, I40IW_CQPSQ_QP_ORDVALID) |
- LS_64(info->tcp_ctx_valid, I40IW_CQPSQ_QP_TOECTXVALID) |
- LS_64(info->cached_var_valid, I40IW_CQPSQ_QP_CACHEDVARVALID) |
- LS_64(qp->virtual_map, I40IW_CQPSQ_QP_VQ) |
- LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) |
- LS_64(info->force_loopback, I40IW_CQPSQ_QP_FORCELOOPBACK) |
- LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
- LS_64(info->remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) |
- LS_64(term_actions, I40IW_CQPSQ_QP_TERMACT) |
- LS_64(info->reset_tcp_conn, I40IW_CQPSQ_QP_RESETCON) |
- LS_64(info->arp_cache_idx_valid, I40IW_CQPSQ_QP_ARPTABIDXVALID) |
- LS_64(info->next_iwarp_state, I40IW_CQPSQ_QP_NEXTIWSTATE) |
- LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
-
- i40iw_insert_wqe_hdr(wqe, header);
-
- i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_MODIFY WQE",
- wqe, I40IW_CQP_WQE_SIZE * 8);
-
- if (post_sq)
- i40iw_sc_cqp_post_sq(cqp);
- return 0;
-}
-
-/**
- * i40iw_sc_qp_destroy - cqp destroy qp
- * @qp: sc qp
- * @scratch: u64 saved to be used during cqp completion
- * @remove_hash_idx: flag if to remove hash idx
- * @ignore_mw_bnd: memory window bind flag
- * @post_sq: flag for cqp db to ring
- */
-static enum i40iw_status_code i40iw_sc_qp_destroy(
- struct i40iw_sc_qp *qp,
- u64 scratch,
- bool remove_hash_idx,
- bool ignore_mw_bnd,
- bool post_sq)
-{
- u64 *wqe;
- struct i40iw_sc_cqp *cqp;
- u64 header;
-
- i40iw_qp_rem_qos(qp);
- cqp = qp->pd->dev->cqp;
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
- if (!wqe)
- return I40IW_ERR_RING_FULL;
- set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
- set_64bit_val(wqe, 40, qp->shadow_area_pa);
-
- header = qp->qp_uk.qp_id |
- LS_64(I40IW_CQP_OP_DESTROY_QP, I40IW_CQPSQ_OPCODE) |
- LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
- LS_64(ignore_mw_bnd, I40IW_CQPSQ_QP_IGNOREMWBOUND) |
- LS_64(remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) |
- LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
-
- i40iw_insert_wqe_hdr(wqe, header);
- i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_DESTROY WQE",
- wqe, I40IW_CQP_WQE_SIZE * 8);
-
- if (post_sq)
- i40iw_sc_cqp_post_sq(cqp);
- return 0;
-}
-
-/**
- * i40iw_sc_qp_flush_wqes - flush qp's wqe
- * @qp: sc qp
- * @info: dlush information
- * @scratch: u64 saved to be used during cqp completion
- * @post_sq: flag for cqp db to ring
- */
-static enum i40iw_status_code i40iw_sc_qp_flush_wqes(
- struct i40iw_sc_qp *qp,
- struct i40iw_qp_flush_info *info,
- u64 scratch,
- bool post_sq)
-{
- u64 temp = 0;
- u64 *wqe;
- struct i40iw_sc_cqp *cqp;
- u64 header;
- bool flush_sq = false, flush_rq = false;
-
- if (info->rq && !qp->flush_rq)
- flush_rq = true;
-
- if (info->sq && !qp->flush_sq)
- flush_sq = true;
-
- qp->flush_sq |= flush_sq;
- qp->flush_rq |= flush_rq;
- if (!flush_sq && !flush_rq)
- return 0;
-
- cqp = qp->pd->dev->cqp;
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
- if (!wqe)
- return I40IW_ERR_RING_FULL;
- if (info->userflushcode) {
- if (flush_rq) {
- temp |= LS_64(info->rq_minor_code, I40IW_CQPSQ_FWQE_RQMNERR) |
- LS_64(info->rq_major_code, I40IW_CQPSQ_FWQE_RQMJERR);
- }
- if (flush_sq) {
- temp |= LS_64(info->sq_minor_code, I40IW_CQPSQ_FWQE_SQMNERR) |
- LS_64(info->sq_major_code, I40IW_CQPSQ_FWQE_SQMJERR);
- }
- }
- set_64bit_val(wqe, 16, temp);
-
- temp = (info->generate_ae) ?
- info->ae_code | LS_64(info->ae_source, I40IW_CQPSQ_FWQE_AESOURCE) : 0;
-
- set_64bit_val(wqe, 8, temp);
-
- header = qp->qp_uk.qp_id |
- LS_64(I40IW_CQP_OP_FLUSH_WQES, I40IW_CQPSQ_OPCODE) |
- LS_64(info->generate_ae, I40IW_CQPSQ_FWQE_GENERATE_AE) |
- LS_64(info->userflushcode, I40IW_CQPSQ_FWQE_USERFLCODE) |
- LS_64(flush_sq, I40IW_CQPSQ_FWQE_FLUSHSQ) |
- LS_64(flush_rq, I40IW_CQPSQ_FWQE_FLUSHRQ) |
- LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
-
- i40iw_insert_wqe_hdr(wqe, header);
-
- i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_FLUSH WQE",
- wqe, I40IW_CQP_WQE_SIZE * 8);
-
- if (post_sq)
- i40iw_sc_cqp_post_sq(cqp);
- return 0;
-}
-
-/**
- * i40iw_sc_gen_ae - generate AE, currently uses flush WQE CQP OP
- * @qp: sc qp
- * @info: gen ae information
- * @scratch: u64 saved to be used during cqp completion
- * @post_sq: flag for cqp db to ring
- */
-static enum i40iw_status_code i40iw_sc_gen_ae(
- struct i40iw_sc_qp *qp,
- struct i40iw_gen_ae_info *info,
- u64 scratch,
- bool post_sq)
-{
- u64 temp;
- u64 *wqe;
- struct i40iw_sc_cqp *cqp;
- u64 header;
-
- cqp = qp->pd->dev->cqp;
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
- if (!wqe)
- return I40IW_ERR_RING_FULL;
-
- temp = info->ae_code |
- LS_64(info->ae_source, I40IW_CQPSQ_FWQE_AESOURCE);
-
- set_64bit_val(wqe, 8, temp);
-
- header = qp->qp_uk.qp_id |
- LS_64(I40IW_CQP_OP_GEN_AE, I40IW_CQPSQ_OPCODE) |
- LS_64(1, I40IW_CQPSQ_FWQE_GENERATE_AE) |
- LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
-
- i40iw_insert_wqe_hdr(wqe, header);
-
- i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "GEN_AE WQE",
- wqe, I40IW_CQP_WQE_SIZE * 8);
-
- if (post_sq)
- i40iw_sc_cqp_post_sq(cqp);
- return 0;
-}
-
-/**
- * i40iw_sc_qp_upload_context - upload qp's context
- * @dev: sc device struct
- * @info: upload context info ptr for return
- * @scratch: u64 saved to be used during cqp completion
- * @post_sq: flag for cqp db to ring
- */
-static enum i40iw_status_code i40iw_sc_qp_upload_context(
- struct i40iw_sc_dev *dev,
- struct i40iw_upload_context_info *info,
- u64 scratch,
- bool post_sq)
-{
- u64 *wqe;
- struct i40iw_sc_cqp *cqp;
- u64 header;
-
- cqp = dev->cqp;
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
- if (!wqe)
- return I40IW_ERR_RING_FULL;
- set_64bit_val(wqe, 16, info->buf_pa);
-
- header = LS_64(info->qp_id, I40IW_CQPSQ_UCTX_QPID) |
- LS_64(I40IW_CQP_OP_UPLOAD_CONTEXT, I40IW_CQPSQ_OPCODE) |
- LS_64(info->qp_type, I40IW_CQPSQ_UCTX_QPTYPE) |
- LS_64(info->raw_format, I40IW_CQPSQ_UCTX_RAWFORMAT) |
- LS_64(info->freeze_qp, I40IW_CQPSQ_UCTX_FREEZEQP) |
- LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
-
- i40iw_insert_wqe_hdr(wqe, header);
-
- i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "QP_UPLOAD_CTX WQE",
- wqe, I40IW_CQP_WQE_SIZE * 8);
-
- if (post_sq)
- i40iw_sc_cqp_post_sq(cqp);
- return 0;
-}
-
-/**
- * i40iw_sc_qp_setctx - set qp's context
- * @qp: sc qp
- * @qp_ctx: context ptr
- * @info: ctx info
- */
-static enum i40iw_status_code i40iw_sc_qp_setctx(
- struct i40iw_sc_qp *qp,
- u64 *qp_ctx,
- struct i40iw_qp_host_ctx_info *info)
-{
- struct i40iwarp_offload_info *iw;
- struct i40iw_tcp_offload_info *tcp;
- struct i40iw_sc_vsi *vsi;
- struct i40iw_sc_dev *dev;
- u64 qw0, qw3, qw7 = 0;
-
- iw = info->iwarp_info;
- tcp = info->tcp_info;
- vsi = qp->vsi;
- dev = qp->dev;
- if (info->add_to_qoslist) {
- qp->user_pri = info->user_pri;
- i40iw_qp_add_qos(qp);
- i40iw_debug(qp->dev, I40IW_DEBUG_DCB, "%s qp[%d] UP[%d] qset[%d]\n",
- __func__, qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle);
- }
- qw0 = LS_64(qp->qp_uk.rq_wqe_size, I40IWQPC_RQWQESIZE) |
- LS_64(info->err_rq_idx_valid, I40IWQPC_ERR_RQ_IDX_VALID) |
- LS_64(qp->rcv_tph_en, I40IWQPC_RCVTPHEN) |
- LS_64(qp->xmit_tph_en, I40IWQPC_XMITTPHEN) |
- LS_64(qp->rq_tph_en, I40IWQPC_RQTPHEN) |
- LS_64(qp->sq_tph_en, I40IWQPC_SQTPHEN);
-
- set_64bit_val(qp_ctx, 8, qp->sq_pa);
- set_64bit_val(qp_ctx, 16, qp->rq_pa);
-
- qw3 = LS_64(qp->src_mac_addr_idx, I40IWQPC_SRCMACADDRIDX) |
- LS_64(qp->hw_rq_size, I40IWQPC_RQSIZE) |
- LS_64(qp->hw_sq_size, I40IWQPC_SQSIZE);
-
- set_64bit_val(qp_ctx,
- 128,
- LS_64(info->err_rq_idx, I40IWQPC_ERR_RQ_IDX));
-
- set_64bit_val(qp_ctx,
- 136,
- LS_64(info->send_cq_num, I40IWQPC_TXCQNUM) |
- LS_64(info->rcv_cq_num, I40IWQPC_RXCQNUM));
-
- set_64bit_val(qp_ctx,
- 168,
- LS_64(info->qp_compl_ctx, I40IWQPC_QPCOMPCTX));
- set_64bit_val(qp_ctx,
- 176,
- LS_64(qp->sq_tph_val, I40IWQPC_SQTPHVAL) |
- LS_64(qp->rq_tph_val, I40IWQPC_RQTPHVAL) |
- LS_64(qp->qs_handle, I40IWQPC_QSHANDLE) |
- LS_64(vsi->exception_lan_queue, I40IWQPC_EXCEPTION_LAN_QUEUE));
-
- if (info->iwarp_info_valid) {
- qw0 |= LS_64(iw->ddp_ver, I40IWQPC_DDP_VER) |
- LS_64(iw->rdmap_ver, I40IWQPC_RDMAP_VER);
-
- qw7 |= LS_64(iw->pd_id, I40IWQPC_PDIDX);
- set_64bit_val(qp_ctx,
- 144,
- LS_64(qp->q2_pa, I40IWQPC_Q2ADDR) |
- LS_64(vsi->fcn_id, I40IWQPC_STAT_INDEX));
- set_64bit_val(qp_ctx,
- 152,
- LS_64(iw->last_byte_sent, I40IWQPC_LASTBYTESENT));
-
- set_64bit_val(qp_ctx,
- 160,
- LS_64(iw->ord_size, I40IWQPC_ORDSIZE) |
- LS_64(iw->ird_size, I40IWQPC_IRDSIZE) |
- LS_64(iw->wr_rdresp_en, I40IWQPC_WRRDRSPOK) |
- LS_64(iw->rd_enable, I40IWQPC_RDOK) |
- LS_64(iw->snd_mark_en, I40IWQPC_SNDMARKERS) |
- LS_64(iw->bind_en, I40IWQPC_BINDEN) |
- LS_64(iw->fast_reg_en, I40IWQPC_FASTREGEN) |
- LS_64(iw->priv_mode_en, I40IWQPC_PRIVEN) |
- LS_64((((vsi->stats_fcn_id_alloc) &&
- (dev->is_pf) && (vsi->fcn_id >= I40IW_FIRST_NON_PF_STAT)) ? 1 : 0),
- I40IWQPC_USESTATSINSTANCE) |
- LS_64(1, I40IWQPC_IWARPMODE) |
- LS_64(iw->rcv_mark_en, I40IWQPC_RCVMARKERS) |
- LS_64(iw->align_hdrs, I40IWQPC_ALIGNHDRS) |
- LS_64(iw->rcv_no_mpa_crc, I40IWQPC_RCVNOMPACRC) |
- LS_64(iw->rcv_mark_offset, I40IWQPC_RCVMARKOFFSET) |
- LS_64(iw->snd_mark_offset, I40IWQPC_SNDMARKOFFSET));
- }
- if (info->tcp_info_valid) {
- qw0 |= LS_64(tcp->ipv4, I40IWQPC_IPV4) |
- LS_64(tcp->no_nagle, I40IWQPC_NONAGLE) |
- LS_64(tcp->insert_vlan_tag, I40IWQPC_INSERTVLANTAG) |
- LS_64(tcp->time_stamp, I40IWQPC_TIMESTAMP) |
- LS_64(tcp->cwnd_inc_limit, I40IWQPC_LIMIT) |
- LS_64(tcp->drop_ooo_seg, I40IWQPC_DROPOOOSEG) |
- LS_64(tcp->dup_ack_thresh, I40IWQPC_DUPACK_THRESH);
-
- qw3 |= LS_64(tcp->ttl, I40IWQPC_TTL) |
- LS_64(tcp->src_mac_addr_idx, I40IWQPC_SRCMACADDRIDX) |
- LS_64(tcp->avoid_stretch_ack, I40IWQPC_AVOIDSTRETCHACK) |
- LS_64(tcp->tos, I40IWQPC_TOS) |
- LS_64(tcp->src_port, I40IWQPC_SRCPORTNUM) |
- LS_64(tcp->dst_port, I40IWQPC_DESTPORTNUM);
-
- qp->src_mac_addr_idx = tcp->src_mac_addr_idx;
- set_64bit_val(qp_ctx,
- 32,
- LS_64(tcp->dest_ip_addr2, I40IWQPC_DESTIPADDR2) |
- LS_64(tcp->dest_ip_addr3, I40IWQPC_DESTIPADDR3));
-
- set_64bit_val(qp_ctx,
- 40,
- LS_64(tcp->dest_ip_addr0, I40IWQPC_DESTIPADDR0) |
- LS_64(tcp->dest_ip_addr1, I40IWQPC_DESTIPADDR1));
-
- set_64bit_val(qp_ctx,
- 48,
- LS_64(tcp->snd_mss, I40IWQPC_SNDMSS) |
- LS_64(tcp->vlan_tag, I40IWQPC_VLANTAG) |
- LS_64(tcp->arp_idx, I40IWQPC_ARPIDX));
-
- qw7 |= LS_64(tcp->flow_label, I40IWQPC_FLOWLABEL) |
- LS_64(tcp->wscale, I40IWQPC_WSCALE) |
- LS_64(tcp->ignore_tcp_opt, I40IWQPC_IGNORE_TCP_OPT) |
- LS_64(tcp->ignore_tcp_uns_opt, I40IWQPC_IGNORE_TCP_UNS_OPT) |
- LS_64(tcp->tcp_state, I40IWQPC_TCPSTATE) |
- LS_64(tcp->rcv_wscale, I40IWQPC_RCVSCALE) |
- LS_64(tcp->snd_wscale, I40IWQPC_SNDSCALE);
-
- set_64bit_val(qp_ctx,
- 72,
- LS_64(tcp->time_stamp_recent, I40IWQPC_TIMESTAMP_RECENT) |
- LS_64(tcp->time_stamp_age, I40IWQPC_TIMESTAMP_AGE));
- set_64bit_val(qp_ctx,
- 80,
- LS_64(tcp->snd_nxt, I40IWQPC_SNDNXT) |
- LS_64(tcp->snd_wnd, I40IWQPC_SNDWND));
-
- set_64bit_val(qp_ctx,
- 88,
- LS_64(tcp->rcv_nxt, I40IWQPC_RCVNXT) |
- LS_64(tcp->rcv_wnd, I40IWQPC_RCVWND));
- set_64bit_val(qp_ctx,
- 96,
- LS_64(tcp->snd_max, I40IWQPC_SNDMAX) |
- LS_64(tcp->snd_una, I40IWQPC_SNDUNA));
- set_64bit_val(qp_ctx,
- 104,
- LS_64(tcp->srtt, I40IWQPC_SRTT) |
- LS_64(tcp->rtt_var, I40IWQPC_RTTVAR));
- set_64bit_val(qp_ctx,
- 112,
- LS_64(tcp->ss_thresh, I40IWQPC_SSTHRESH) |
- LS_64(tcp->cwnd, I40IWQPC_CWND));
- set_64bit_val(qp_ctx,
- 120,
- LS_64(tcp->snd_wl1, I40IWQPC_SNDWL1) |
- LS_64(tcp->snd_wl2, I40IWQPC_SNDWL2));
- set_64bit_val(qp_ctx,
- 128,
- LS_64(tcp->max_snd_window, I40IWQPC_MAXSNDWND) |
- LS_64(tcp->rexmit_thresh, I40IWQPC_REXMIT_THRESH));
- set_64bit_val(qp_ctx,
- 184,
- LS_64(tcp->local_ipaddr3, I40IWQPC_LOCAL_IPADDR3) |
- LS_64(tcp->local_ipaddr2, I40IWQPC_LOCAL_IPADDR2));
- set_64bit_val(qp_ctx,
- 192,
- LS_64(tcp->local_ipaddr1, I40IWQPC_LOCAL_IPADDR1) |
- LS_64(tcp->local_ipaddr0, I40IWQPC_LOCAL_IPADDR0));
- }
-
- set_64bit_val(qp_ctx, 0, qw0);
- set_64bit_val(qp_ctx, 24, qw3);
- set_64bit_val(qp_ctx, 56, qw7);
-
- i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "QP_HOST)CTX WQE",
- qp_ctx, I40IW_QP_CTX_SIZE);
- return 0;
-}
-
-/**
- * i40iw_sc_alloc_stag - mr stag alloc
- * @dev: sc device struct
- * @info: stag info
- * @scratch: u64 saved to be used during cqp completion
- * @post_sq: flag for cqp db to ring
- */
-static enum i40iw_status_code i40iw_sc_alloc_stag(
- struct i40iw_sc_dev *dev,
- struct i40iw_allocate_stag_info *info,
- u64 scratch,
- bool post_sq)
-{
- u64 *wqe;
- struct i40iw_sc_cqp *cqp;
- u64 header;
- enum i40iw_page_size page_size;
-
- page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;
- cqp = dev->cqp;
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
- if (!wqe)
- return I40IW_ERR_RING_FULL;
- set_64bit_val(wqe,
- 8,
- LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID) |
- LS_64(info->total_len, I40IW_CQPSQ_STAG_STAGLEN));
- set_64bit_val(wqe,
- 16,
- LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX));
- set_64bit_val(wqe,
- 40,
- LS_64(info->hmc_fcn_index, I40IW_CQPSQ_STAG_HMCFNIDX));
-
- header = LS_64(I40IW_CQP_OP_ALLOC_STAG, I40IW_CQPSQ_OPCODE) |
- LS_64(1, I40IW_CQPSQ_STAG_MR) |
- LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
- LS_64(info->chunk_size, I40IW_CQPSQ_STAG_LPBLSIZE) |
- LS_64(page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |
- LS_64(info->remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
- LS_64(info->use_hmc_fcn_index, I40IW_CQPSQ_STAG_USEHMCFNIDX) |
- LS_64(info->use_pf_rid, I40IW_CQPSQ_STAG_USEPFRID) |
- LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
-
- i40iw_insert_wqe_hdr(wqe, header);
-
- i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "ALLOC_STAG WQE",
- wqe, I40IW_CQP_WQE_SIZE * 8);
-
- if (post_sq)
- i40iw_sc_cqp_post_sq(cqp);
- return 0;
-}
-
-/**
- * i40iw_sc_mr_reg_non_shared - non-shared mr registration
- * @dev: sc device struct
- * @info: mr info
- * @scratch: u64 saved to be used during cqp completion
- * @post_sq: flag for cqp db to ring
- */
-static enum i40iw_status_code i40iw_sc_mr_reg_non_shared(
- struct i40iw_sc_dev *dev,
- struct i40iw_reg_ns_stag_info *info,
- u64 scratch,
- bool post_sq)
-{
- u64 *wqe;
- u64 temp;
- struct i40iw_sc_cqp *cqp;
- u64 header;
- u32 pble_obj_cnt;
- bool remote_access;
- u8 addr_type;
- enum i40iw_page_size page_size;
-
- page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;
- if (info->access_rights & (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY |
- I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY))
- remote_access = true;
- else
- remote_access = false;
-
- pble_obj_cnt = dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
-
- if (info->chunk_size && (info->first_pm_pbl_index >= pble_obj_cnt))
- return I40IW_ERR_INVALID_PBLE_INDEX;
-
- cqp = dev->cqp;
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
- if (!wqe)
- return I40IW_ERR_RING_FULL;
-
- temp = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? (uintptr_t)info->va : info->fbo;
- set_64bit_val(wqe, 0, temp);
-
- set_64bit_val(wqe,
- 8,
- LS_64(info->total_len, I40IW_CQPSQ_STAG_STAGLEN) |
- LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID));
-
- set_64bit_val(wqe,
- 16,
- LS_64(info->stag_key, I40IW_CQPSQ_STAG_KEY) |
- LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX));
- if (!info->chunk_size) {
- set_64bit_val(wqe, 32, info->reg_addr_pa);
- set_64bit_val(wqe, 48, 0);
- } else {
- set_64bit_val(wqe, 32, 0);
- set_64bit_val(wqe, 48, info->first_pm_pbl_index);
- }
- set_64bit_val(wqe, 40, info->hmc_fcn_index);
- set_64bit_val(wqe, 56, 0);
-
- addr_type = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? 1 : 0;
- header = LS_64(I40IW_CQP_OP_REG_MR, I40IW_CQPSQ_OPCODE) |
- LS_64(1, I40IW_CQPSQ_STAG_MR) |
- LS_64(info->chunk_size, I40IW_CQPSQ_STAG_LPBLSIZE) |
- LS_64(page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |
- LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
- LS_64(remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
- LS_64(addr_type, I40IW_CQPSQ_STAG_VABASEDTO) |
- LS_64(info->use_hmc_fcn_index, I40IW_CQPSQ_STAG_USEHMCFNIDX) |
- LS_64(info->use_pf_rid, I40IW_CQPSQ_STAG_USEPFRID) |
- LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
-
- i40iw_insert_wqe_hdr(wqe, header);
-
- i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "MR_REG_NS WQE",
- wqe, I40IW_CQP_WQE_SIZE * 8);
-
- if (post_sq)
- i40iw_sc_cqp_post_sq(cqp);
- return 0;
-}
-
-/**
- * i40iw_sc_mr_reg_shared - registered shared memory region
- * @dev: sc device struct
- * @info: info for shared memory registeration
- * @scratch: u64 saved to be used during cqp completion
- * @post_sq: flag for cqp db to ring
- */
-static enum i40iw_status_code i40iw_sc_mr_reg_shared(
- struct i40iw_sc_dev *dev,
- struct i40iw_register_shared_stag *info,
- u64 scratch,
- bool post_sq)
-{
- u64 *wqe;
- struct i40iw_sc_cqp *cqp;
- u64 temp, va64, fbo, header;
- u32 va32;
- bool remote_access;
- u8 addr_type;
-
- if (info->access_rights & (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY |
- I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY))
- remote_access = true;
- else
- remote_access = false;
- cqp = dev->cqp;
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
- if (!wqe)
- return I40IW_ERR_RING_FULL;
- va64 = (uintptr_t)(info->va);
- va32 = (u32)(va64 & 0x00000000FFFFFFFF);
- fbo = (u64)(va32 & (4096 - 1));
-
- set_64bit_val(wqe,
- 0,
- (info->addr_type == I40IW_ADDR_TYPE_VA_BASED ? (uintptr_t)info->va : fbo));
-
- set_64bit_val(wqe,
- 8,
- LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID));
- temp = LS_64(info->new_stag_key, I40IW_CQPSQ_STAG_KEY) |
- LS_64(info->new_stag_idx, I40IW_CQPSQ_STAG_IDX) |
- LS_64(info->parent_stag_idx, I40IW_CQPSQ_STAG_PARENTSTAGIDX);
- set_64bit_val(wqe, 16, temp);
-
- addr_type = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? 1 : 0;
- header = LS_64(I40IW_CQP_OP_REG_SMR, I40IW_CQPSQ_OPCODE) |
- LS_64(1, I40IW_CQPSQ_STAG_MR) |
- LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
- LS_64(remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
- LS_64(addr_type, I40IW_CQPSQ_STAG_VABASEDTO) |
- LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
-
- i40iw_insert_wqe_hdr(wqe, header);
-
- i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "MR_REG_SHARED WQE",
- wqe, I40IW_CQP_WQE_SIZE * 8);
-
- if (post_sq)
- i40iw_sc_cqp_post_sq(cqp);
- return 0;
-}
-
-/**
- * i40iw_sc_dealloc_stag - deallocate stag
- * @dev: sc device struct
- * @info: dealloc stag info
- * @scratch: u64 saved to be used during cqp completion
- * @post_sq: flag for cqp db to ring
- */
-static enum i40iw_status_code i40iw_sc_dealloc_stag(
- struct i40iw_sc_dev *dev,
- struct i40iw_dealloc_stag_info *info,
- u64 scratch,
- bool post_sq)
-{
- u64 header;
- u64 *wqe;
- struct i40iw_sc_cqp *cqp;
-
- cqp = dev->cqp;
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
- if (!wqe)
- return I40IW_ERR_RING_FULL;
- set_64bit_val(wqe,
- 8,
- LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID));
- set_64bit_val(wqe,
- 16,
- LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX));
-
- header = LS_64(I40IW_CQP_OP_DEALLOC_STAG, I40IW_CQPSQ_OPCODE) |
- LS_64(info->mr, I40IW_CQPSQ_STAG_MR) |
- LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
-
- i40iw_insert_wqe_hdr(wqe, header);
-
- i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "DEALLOC_STAG WQE",
- wqe, I40IW_CQP_WQE_SIZE * 8);
-
- if (post_sq)
- i40iw_sc_cqp_post_sq(cqp);
- return 0;
-}
-
-/**
- * i40iw_sc_query_stag - query hardware for stag
- * @dev: sc device struct
- * @scratch: u64 saved to be used during cqp completion
- * @stag_index: stag index for query
- * @post_sq: flag for cqp db to ring
- */
-static enum i40iw_status_code i40iw_sc_query_stag(struct i40iw_sc_dev *dev,
- u64 scratch,
- u32 stag_index,
- bool post_sq)
-{
- u64 header;
- u64 *wqe;
- struct i40iw_sc_cqp *cqp;
-
- cqp = dev->cqp;
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
- if (!wqe)
- return I40IW_ERR_RING_FULL;
- set_64bit_val(wqe,
- 16,
- LS_64(stag_index, I40IW_CQPSQ_QUERYSTAG_IDX));
-
- header = LS_64(I40IW_CQP_OP_QUERY_STAG, I40IW_CQPSQ_OPCODE) |
- LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
-
- i40iw_insert_wqe_hdr(wqe, header);
-
- i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "QUERY_STAG WQE",
- wqe, I40IW_CQP_WQE_SIZE * 8);
-
- if (post_sq)
- i40iw_sc_cqp_post_sq(cqp);
- return 0;
-}
-
-/**
- * i40iw_sc_mw_alloc - mw allocate
- * @dev: sc device struct
- * @scratch: u64 saved to be used during cqp completion
- * @mw_stag_index:stag index
- * @pd_id: pd is for this mw
- * @post_sq: flag for cqp db to ring
- */
-static enum i40iw_status_code i40iw_sc_mw_alloc(
- struct i40iw_sc_dev *dev,
- u64 scratch,
- u32 mw_stag_index,
- u16 pd_id,
- bool post_sq)
-{
- u64 header;
- struct i40iw_sc_cqp *cqp;
- u64 *wqe;
-
- cqp = dev->cqp;
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
- if (!wqe)
- return I40IW_ERR_RING_FULL;
- set_64bit_val(wqe, 8, LS_64(pd_id, I40IW_CQPSQ_STAG_PDID));
- set_64bit_val(wqe,
- 16,
- LS_64(mw_stag_index, I40IW_CQPSQ_STAG_IDX));
-
- header = LS_64(I40IW_CQP_OP_ALLOC_STAG, I40IW_CQPSQ_OPCODE) |
- LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
-
- i40iw_insert_wqe_hdr(wqe, header);
-
- i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "MW_ALLOC WQE",
- wqe, I40IW_CQP_WQE_SIZE * 8);
-
- if (post_sq)
- i40iw_sc_cqp_post_sq(cqp);
- return 0;
-}
-
-/**
- * i40iw_sc_mr_fast_register - Posts RDMA fast register mr WR to iwarp qp
- * @qp: sc qp struct
- * @info: fast mr info
- * @post_sq: flag for cqp db to ring
- */
-enum i40iw_status_code i40iw_sc_mr_fast_register(
- struct i40iw_sc_qp *qp,
- struct i40iw_fast_reg_stag_info *info,
- bool post_sq)
-{
- u64 temp, header;
- u64 *wqe;
- u32 wqe_idx;
- enum i40iw_page_size page_size;
-
- page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;
- wqe = i40iw_qp_get_next_send_wqe(&qp->qp_uk, &wqe_idx, I40IW_QP_WQE_MIN_SIZE,
- 0, info->wr_id);
- if (!wqe)
- return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
-
- i40iw_debug(qp->dev, I40IW_DEBUG_MR, "%s: wr_id[%llxh] wqe_idx[%04d] location[%p]\n",
- __func__, info->wr_id, wqe_idx,
- &qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid);
- temp = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? (uintptr_t)info->va : info->fbo;
- set_64bit_val(wqe, 0, temp);
-
- temp = RS_64(info->first_pm_pbl_index >> 16, I40IWQPSQ_FIRSTPMPBLIDXHI);
- set_64bit_val(wqe,
- 8,
- LS_64(temp, I40IWQPSQ_FIRSTPMPBLIDXHI) |
- LS_64(info->reg_addr_pa >> I40IWQPSQ_PBLADDR_SHIFT, I40IWQPSQ_PBLADDR));
-
- set_64bit_val(wqe,
- 16,
- info->total_len |
- LS_64(info->first_pm_pbl_index, I40IWQPSQ_FIRSTPMPBLIDXLO));
-
- header = LS_64(info->stag_key, I40IWQPSQ_STAGKEY) |
- LS_64(info->stag_idx, I40IWQPSQ_STAGINDEX) |
- LS_64(I40IWQP_OP_FAST_REGISTER, I40IWQPSQ_OPCODE) |
- LS_64(info->chunk_size, I40IWQPSQ_LPBLSIZE) |
- LS_64(page_size, I40IWQPSQ_HPAGESIZE) |
- LS_64(info->access_rights, I40IWQPSQ_STAGRIGHTS) |
- LS_64(info->addr_type, I40IWQPSQ_VABASEDTO) |
- LS_64(info->read_fence, I40IWQPSQ_READFENCE) |
- LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |
- LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
- LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
-
- i40iw_insert_wqe_hdr(wqe, header);
-
- i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "FAST_REG WQE",
- wqe, I40IW_QP_WQE_MIN_SIZE);
-
- if (post_sq)
- i40iw_qp_post_wr(&qp->qp_uk);
- return 0;
-}
-
-/**
- * i40iw_sc_send_lsmm - send last streaming mode message
- * @qp: sc qp struct
- * @lsmm_buf: buffer with lsmm message
- * @size: size of lsmm buffer
- * @stag: stag of lsmm buffer
- */
-static void i40iw_sc_send_lsmm(struct i40iw_sc_qp *qp,
- void *lsmm_buf,
- u32 size,
- i40iw_stag stag)
-{
- u64 *wqe;
- u64 header;
- struct i40iw_qp_uk *qp_uk;
-
- qp_uk = &qp->qp_uk;
- wqe = qp_uk->sq_base->elem;
-
- set_64bit_val(wqe, 0, (uintptr_t)lsmm_buf);
-
- set_64bit_val(wqe, 8, (size | LS_64(stag, I40IWQPSQ_FRAG_STAG)));
-
- set_64bit_val(wqe, 16, 0);
-
- header = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) |
- LS_64(1, I40IWQPSQ_STREAMMODE) |
- LS_64(1, I40IWQPSQ_WAITFORRCVPDU) |
- LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
-
- i40iw_insert_wqe_hdr(wqe, header);
-
- i40iw_debug_buf(qp->dev, I40IW_DEBUG_QP, "SEND_LSMM WQE",
- wqe, I40IW_QP_WQE_MIN_SIZE);
-}
-
-/**
- * i40iw_sc_send_lsmm_nostag - for privilege qp
- * @qp: sc qp struct
- * @lsmm_buf: buffer with lsmm message
- * @size: size of lsmm buffer
- */
-static void i40iw_sc_send_lsmm_nostag(struct i40iw_sc_qp *qp,
- void *lsmm_buf,
- u32 size)
-{
- u64 *wqe;
- u64 header;
- struct i40iw_qp_uk *qp_uk;
-
- qp_uk = &qp->qp_uk;
- wqe = qp_uk->sq_base->elem;
-
- set_64bit_val(wqe, 0, (uintptr_t)lsmm_buf);
-
- set_64bit_val(wqe, 8, size);
-
- set_64bit_val(wqe, 16, 0);
-
- header = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) |
- LS_64(1, I40IWQPSQ_STREAMMODE) |
- LS_64(1, I40IWQPSQ_WAITFORRCVPDU) |
- LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
-
- i40iw_insert_wqe_hdr(wqe, header);
-
- i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "SEND_LSMM_NOSTAG WQE",
- wqe, I40IW_QP_WQE_MIN_SIZE);
-}
-
-/**
- * i40iw_sc_send_rtt - send last read0 or write0
- * @qp: sc qp struct
- * @read: Do read0 or write0
- */
-static void i40iw_sc_send_rtt(struct i40iw_sc_qp *qp, bool read)
-{
- u64 *wqe;
- u64 header;
- struct i40iw_qp_uk *qp_uk;
-
- qp_uk = &qp->qp_uk;
- wqe = qp_uk->sq_base->elem;
-
- set_64bit_val(wqe, 0, 0);
- set_64bit_val(wqe, 8, 0);
- set_64bit_val(wqe, 16, 0);
- if (read) {
- header = LS_64(0x1234, I40IWQPSQ_REMSTAG) |
- LS_64(I40IWQP_OP_RDMA_READ, I40IWQPSQ_OPCODE) |
- LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
- set_64bit_val(wqe, 8, ((u64)0xabcd << 32));
- } else {
- header = LS_64(I40IWQP_OP_RDMA_WRITE, I40IWQPSQ_OPCODE) |
- LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
- }
-
- i40iw_insert_wqe_hdr(wqe, header);
-
- i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "RTR WQE",
- wqe, I40IW_QP_WQE_MIN_SIZE);
-}
-
-/**
- * i40iw_sc_post_wqe0 - send wqe with opcode
- * @qp: sc qp struct
- * @opcode: opcode to use for wqe0
- */
-static enum i40iw_status_code i40iw_sc_post_wqe0(struct i40iw_sc_qp *qp, u8 opcode)
-{
- u64 *wqe;
- u64 header;
- struct i40iw_qp_uk *qp_uk;
-
- qp_uk = &qp->qp_uk;
- wqe = qp_uk->sq_base->elem;
-
- if (!wqe)
- return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
- switch (opcode) {
- case I40IWQP_OP_NOP:
- set_64bit_val(wqe, 0, 0);
- set_64bit_val(wqe, 8, 0);
- set_64bit_val(wqe, 16, 0);
- header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) |
- LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
-
- i40iw_insert_wqe_hdr(wqe, header);
- break;
- case I40IWQP_OP_RDMA_SEND:
- set_64bit_val(wqe, 0, 0);
- set_64bit_val(wqe, 8, 0);
- set_64bit_val(wqe, 16, 0);
- header = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) |
- LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID) |
- LS_64(1, I40IWQPSQ_STREAMMODE) |
- LS_64(1, I40IWQPSQ_WAITFORRCVPDU);
-
- i40iw_insert_wqe_hdr(wqe, header);
- break;
- default:
- i40iw_debug(qp->dev, I40IW_DEBUG_QP, "%s: Invalid WQE zero opcode\n",
- __func__);
- break;
- }
- return 0;
-}
-
-/**
- * i40iw_sc_init_iw_hmc() - queries fpm values using cqp and populates hmc_info
- * @dev : ptr to i40iw_dev struct
- * @hmc_fn_id: hmc function id
- */
-enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev, u8 hmc_fn_id)
-{
- struct i40iw_hmc_info *hmc_info;
- struct i40iw_dma_mem query_fpm_mem;
- struct i40iw_virt_mem virt_mem;
- struct i40iw_vfdev *vf_dev = NULL;
- u32 mem_size;
- enum i40iw_status_code ret_code = 0;
- bool poll_registers = true;
- u16 iw_vf_idx;
- u8 wait_type;
-
- if (hmc_fn_id >= I40IW_MAX_VF_FPM_ID ||
- (dev->hmc_fn_id != hmc_fn_id && hmc_fn_id < I40IW_FIRST_VF_FPM_ID))
- return I40IW_ERR_INVALID_HMCFN_ID;
-
- i40iw_debug(dev, I40IW_DEBUG_HMC, "hmc_fn_id %u, dev->hmc_fn_id %u\n", hmc_fn_id,
- dev->hmc_fn_id);
- if (hmc_fn_id == dev->hmc_fn_id) {
- hmc_info = dev->hmc_info;
- query_fpm_mem.pa = dev->fpm_query_buf_pa;
- query_fpm_mem.va = dev->fpm_query_buf;
- } else {
- vf_dev = i40iw_vfdev_from_fpm(dev, hmc_fn_id);
- if (!vf_dev)
- return I40IW_ERR_INVALID_VF_ID;
-
- hmc_info = &vf_dev->hmc_info;
- iw_vf_idx = vf_dev->iw_vf_idx;
- i40iw_debug(dev, I40IW_DEBUG_HMC, "vf_dev %p, hmc_info %p, hmc_obj %p\n", vf_dev,
- hmc_info, hmc_info->hmc_obj);
- if (!vf_dev->fpm_query_buf) {
- if (!dev->vf_fpm_query_buf[iw_vf_idx].va) {
- ret_code = i40iw_alloc_query_fpm_buf(dev,
- &dev->vf_fpm_query_buf[iw_vf_idx]);
- if (ret_code)
- return ret_code;
- }
- vf_dev->fpm_query_buf = dev->vf_fpm_query_buf[iw_vf_idx].va;
- vf_dev->fpm_query_buf_pa = dev->vf_fpm_query_buf[iw_vf_idx].pa;
- }
- query_fpm_mem.pa = vf_dev->fpm_query_buf_pa;
- query_fpm_mem.va = vf_dev->fpm_query_buf;
- /**
- * It is HARDWARE specific:
- * this call is done by PF for VF and
- * i40iw_sc_query_fpm_values needs ccq poll
- * because PF ccq is already created.
- */
- poll_registers = false;
- }
-
- hmc_info->hmc_fn_id = hmc_fn_id;
-
- if (hmc_fn_id != dev->hmc_fn_id) {
- ret_code =
- i40iw_cqp_query_fpm_values_cmd(dev, &query_fpm_mem, hmc_fn_id);
- } else {
- wait_type = poll_registers ? (u8)I40IW_CQP_WAIT_POLL_REGS :
- (u8)I40IW_CQP_WAIT_POLL_CQ;
-
- ret_code = i40iw_sc_query_fpm_values(
- dev->cqp,
- 0,
- hmc_info->hmc_fn_id,
- &query_fpm_mem,
- true,
- wait_type);
- }
- if (ret_code)
- return ret_code;
-
- /* parse the fpm_query_buf and fill hmc obj info */
- ret_code =
- i40iw_sc_parse_fpm_query_buf((u64 *)query_fpm_mem.va,
- hmc_info,
- &dev->hmc_fpm_misc);
- if (ret_code)
- return ret_code;
- i40iw_debug_buf(dev, I40IW_DEBUG_HMC, "QUERY FPM BUFFER",
- query_fpm_mem.va, I40IW_QUERY_FPM_BUF_SIZE);
-
- if (hmc_fn_id != dev->hmc_fn_id) {
- i40iw_cqp_commit_fpm_values_cmd(dev, &query_fpm_mem, hmc_fn_id);
-
- /* parse the fpm_commit_buf and fill hmc obj info */
- i40iw_sc_parse_fpm_commit_buf((u64 *)query_fpm_mem.va, hmc_info->hmc_obj, &hmc_info->sd_table.sd_cnt);
- mem_size = sizeof(struct i40iw_hmc_sd_entry) *
- (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index);
- ret_code = i40iw_allocate_virt_mem(dev->hw, &virt_mem, mem_size);
- if (ret_code)
- return ret_code;
- hmc_info->sd_table.sd_entry = virt_mem.va;
- }
-
- return ret_code;
-}
-
-/**
- * i40iw_sc_configure_iw_fpm() - commits hmc obj cnt values using cqp command and
- * populates fpm base address in hmc_info
- * @dev : ptr to i40iw_dev struct
- * @hmc_fn_id: hmc function id
- */
-static enum i40iw_status_code i40iw_sc_configure_iw_fpm(struct i40iw_sc_dev *dev,
- u8 hmc_fn_id)
-{
- struct i40iw_hmc_info *hmc_info;
- struct i40iw_hmc_obj_info *obj_info;
- u64 *buf;
- struct i40iw_dma_mem commit_fpm_mem;
- u32 i, j;
- enum i40iw_status_code ret_code = 0;
- bool poll_registers = true;
- u8 wait_type;
-
- if (hmc_fn_id >= I40IW_MAX_VF_FPM_ID ||
- (dev->hmc_fn_id != hmc_fn_id && hmc_fn_id < I40IW_FIRST_VF_FPM_ID))
- return I40IW_ERR_INVALID_HMCFN_ID;
-
- if (hmc_fn_id == dev->hmc_fn_id) {
- hmc_info = dev->hmc_info;
- } else {
- hmc_info = i40iw_vf_hmcinfo_from_fpm(dev, hmc_fn_id);
- poll_registers = false;
- }
- if (!hmc_info)
- return I40IW_ERR_BAD_PTR;
-
- obj_info = hmc_info->hmc_obj;
- buf = dev->fpm_commit_buf;
-
- /* copy cnt values in commit buf */
- for (i = I40IW_HMC_IW_QP, j = 0; i <= I40IW_HMC_IW_PBLE;
- i++, j += 8)
- set_64bit_val(buf, j, (u64)obj_info[i].cnt);
-
- set_64bit_val(buf, 40, 0); /* APBVT rsvd */
-
- commit_fpm_mem.pa = dev->fpm_commit_buf_pa;
- commit_fpm_mem.va = dev->fpm_commit_buf;
- wait_type = poll_registers ? (u8)I40IW_CQP_WAIT_POLL_REGS :
- (u8)I40IW_CQP_WAIT_POLL_CQ;
- ret_code = i40iw_sc_commit_fpm_values(
- dev->cqp,
- 0,
- hmc_info->hmc_fn_id,
- &commit_fpm_mem,
- true,
- wait_type);
-
- /* parse the fpm_commit_buf and fill hmc obj info */
- if (!ret_code)
- ret_code = i40iw_sc_parse_fpm_commit_buf(dev->fpm_commit_buf,
- hmc_info->hmc_obj,
- &hmc_info->sd_table.sd_cnt);
-
- i40iw_debug_buf(dev, I40IW_DEBUG_HMC, "COMMIT FPM BUFFER",
- commit_fpm_mem.va, I40IW_COMMIT_FPM_BUF_SIZE);
-
- return ret_code;
-}
-
-/**
- * cqp_sds_wqe_fill - fill cqp wqe doe sd
- * @cqp: struct for cqp hw
- * @info: sd info for wqe
- * @scratch: u64 saved to be used during cqp completion
- */
-static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp,
- struct i40iw_update_sds_info *info,
- u64 scratch)
-{
- u64 data;
- u64 header;
- u64 *wqe;
- int mem_entries, wqe_entries;
- struct i40iw_dma_mem *sdbuf = &cqp->sdbuf;
- u64 offset;
- u32 wqe_idx;
-
- wqe = i40iw_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
- if (!wqe)
- return I40IW_ERR_RING_FULL;
-
- I40IW_CQP_INIT_WQE(wqe);
- wqe_entries = (info->cnt > 3) ? 3 : info->cnt;
- mem_entries = info->cnt - wqe_entries;
-
- header = LS_64(I40IW_CQP_OP_UPDATE_PE_SDS, I40IW_CQPSQ_OPCODE) |
- LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) |
- LS_64(mem_entries, I40IW_CQPSQ_UPESD_ENTRY_COUNT);
-
- if (mem_entries) {
- offset = wqe_idx * I40IW_UPDATE_SD_BUF_SIZE;
- memcpy((char *)sdbuf->va + offset, &info->entry[3],
- mem_entries << 4);
- data = (u64)sdbuf->pa + offset;
- } else {
- data = 0;
- }
- data |= LS_64(info->hmc_fn_id, I40IW_CQPSQ_UPESD_HMCFNID);
-
- set_64bit_val(wqe, 16, data);
-
- switch (wqe_entries) {
- case 3:
- set_64bit_val(wqe, 48,
- (LS_64(info->entry[2].cmd, I40IW_CQPSQ_UPESD_SDCMD) |
- LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID)));
-
- set_64bit_val(wqe, 56, info->entry[2].data);
- fallthrough;
- case 2:
- set_64bit_val(wqe, 32,
- (LS_64(info->entry[1].cmd, I40IW_CQPSQ_UPESD_SDCMD) |
- LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID)));
-
- set_64bit_val(wqe, 40, info->entry[1].data);
- fallthrough;
- case 1:
- set_64bit_val(wqe, 0,
- LS_64(info->entry[0].cmd, I40IW_CQPSQ_UPESD_SDCMD));
-
- set_64bit_val(wqe, 8, info->entry[0].data);
- break;
- default:
- break;
- }
-
- i40iw_insert_wqe_hdr(wqe, header);
-
- i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "UPDATE_PE_SDS WQE",
- wqe, I40IW_CQP_WQE_SIZE * 8);
- return 0;
-}
-
-/**
- * i40iw_update_pe_sds - cqp wqe for sd
- * @dev: ptr to i40iw_dev struct
- * @info: sd info for sd's
- * @scratch: u64 saved to be used during cqp completion
- */
-static enum i40iw_status_code i40iw_update_pe_sds(struct i40iw_sc_dev *dev,
- struct i40iw_update_sds_info *info,
- u64 scratch)
-{
- struct i40iw_sc_cqp *cqp = dev->cqp;
- enum i40iw_status_code ret_code;
-
- ret_code = cqp_sds_wqe_fill(cqp, info, scratch);
- if (!ret_code)
- i40iw_sc_cqp_post_sq(cqp);
-
- return ret_code;
-}
-
-/**
- * i40iw_update_sds_noccq - update sd before ccq created
- * @dev: sc device struct
- * @info: sd info for sd's
- */
-enum i40iw_status_code i40iw_update_sds_noccq(struct i40iw_sc_dev *dev,
- struct i40iw_update_sds_info *info)
-{
- u32 error, val, tail;
- struct i40iw_sc_cqp *cqp = dev->cqp;
- enum i40iw_status_code ret_code;
-
- ret_code = cqp_sds_wqe_fill(cqp, info, 0);
- if (ret_code)
- return ret_code;
- i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
- if (error)
- return I40IW_ERR_CQP_COMPL_ERROR;
-
- i40iw_sc_cqp_post_sq(cqp);
- ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT);
-
- return ret_code;
-}
-
-/**
- * i40iw_sc_suspend_qp - suspend qp for param change
- * @cqp: struct for cqp hw
- * @qp: sc qp struct
- * @scratch: u64 saved to be used during cqp completion
- */
-enum i40iw_status_code i40iw_sc_suspend_qp(struct i40iw_sc_cqp *cqp,
- struct i40iw_sc_qp *qp,
- u64 scratch)
-{
- u64 header;
- u64 *wqe;
-
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
- if (!wqe)
- return I40IW_ERR_RING_FULL;
- header = LS_64(qp->qp_uk.qp_id, I40IW_CQPSQ_SUSPENDQP_QPID) |
- LS_64(I40IW_CQP_OP_SUSPEND_QP, I40IW_CQPSQ_OPCODE) |
- LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
-
- i40iw_insert_wqe_hdr(wqe, header);
-
- i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "SUSPEND_QP WQE",
- wqe, I40IW_CQP_WQE_SIZE * 8);
-
- i40iw_sc_cqp_post_sq(cqp);
- return 0;
-}
-
-/**
- * i40iw_sc_resume_qp - resume qp after suspend
- * @cqp: struct for cqp hw
- * @qp: sc qp struct
- * @scratch: u64 saved to be used during cqp completion
- */
-enum i40iw_status_code i40iw_sc_resume_qp(struct i40iw_sc_cqp *cqp,
- struct i40iw_sc_qp *qp,
- u64 scratch)
-{
- u64 header;
- u64 *wqe;
-
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
- if (!wqe)
- return I40IW_ERR_RING_FULL;
- set_64bit_val(wqe,
- 16,
- LS_64(qp->qs_handle, I40IW_CQPSQ_RESUMEQP_QSHANDLE));
-
- header = LS_64(qp->qp_uk.qp_id, I40IW_CQPSQ_RESUMEQP_QPID) |
- LS_64(I40IW_CQP_OP_RESUME_QP, I40IW_CQPSQ_OPCODE) |
- LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
-
- i40iw_insert_wqe_hdr(wqe, header);
-
- i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "RESUME_QP WQE",
- wqe, I40IW_CQP_WQE_SIZE * 8);
-
- i40iw_sc_cqp_post_sq(cqp);
- return 0;
-}
-
-/**
- * i40iw_sc_static_hmc_pages_allocated - cqp wqe to allocate hmc pages
- * @cqp: struct for cqp hw
- * @scratch: u64 saved to be used during cqp completion
- * @hmc_fn_id: hmc function id
- * @post_sq: flag for cqp db to ring
- * @poll_registers: flag to poll register for cqp completion
- */
-enum i40iw_status_code i40iw_sc_static_hmc_pages_allocated(
- struct i40iw_sc_cqp *cqp,
- u64 scratch,
- u8 hmc_fn_id,
- bool post_sq,
- bool poll_registers)
-{
- u64 header;
- u64 *wqe;
- u32 tail, val, error;
- enum i40iw_status_code ret_code = 0;
-
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
- if (!wqe)
- return I40IW_ERR_RING_FULL;
- set_64bit_val(wqe,
- 16,
- LS_64(hmc_fn_id, I40IW_SHMC_PAGE_ALLOCATED_HMC_FN_ID));
-
- header = LS_64(I40IW_CQP_OP_SHMC_PAGES_ALLOCATED, I40IW_CQPSQ_OPCODE) |
- LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
-
- i40iw_insert_wqe_hdr(wqe, header);
-
- i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "SHMC_PAGES_ALLOCATED WQE",
- wqe, I40IW_CQP_WQE_SIZE * 8);
- i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
- if (error) {
- ret_code = I40IW_ERR_CQP_COMPL_ERROR;
- return ret_code;
- }
- if (post_sq) {
- i40iw_sc_cqp_post_sq(cqp);
- if (poll_registers)
- /* check for cqp sq tail update */
- ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000);
- else
- ret_code = i40iw_sc_poll_for_cqp_op_done(cqp,
- I40IW_CQP_OP_SHMC_PAGES_ALLOCATED,
- NULL);
- }
-
- return ret_code;
-}
-
-/**
- * i40iw_ring_full - check if cqp ring is full
- * @cqp: struct for cqp hw
- */
-static bool i40iw_ring_full(struct i40iw_sc_cqp *cqp)
-{
- return I40IW_RING_FULL_ERR(cqp->sq_ring);
-}
-
-/**
- * i40iw_est_sd - returns approximate number of SDs for HMC
- * @dev: sc device struct
- * @hmc_info: hmc structure, size and count for HMC objects
- */
-static u64 i40iw_est_sd(struct i40iw_sc_dev *dev, struct i40iw_hmc_info *hmc_info)
-{
- int i;
- u64 size = 0;
- u64 sd;
-
- for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_PBLE; i++)
- size += hmc_info->hmc_obj[i].cnt * hmc_info->hmc_obj[i].size;
-
- if (dev->is_pf)
- size += hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt * hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].size;
-
- if (size & 0x1FFFFF)
- sd = (size >> 21) + 1; /* add 1 for remainder */
- else
- sd = size >> 21;
-
- if (!dev->is_pf) {
- /* 2MB alignment for VF PBLE HMC */
- size = hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt * hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].size;
- if (size & 0x1FFFFF)
- sd += (size >> 21) + 1; /* add 1 for remainder */
- else
- sd += size >> 21;
- }
-
- return sd;
-}
-
-/**
- * i40iw_config_fpm_values - configure HMC objects
- * @dev: sc device struct
- * @qp_count: desired qp count
- */
-enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_count)
-{
- struct i40iw_virt_mem virt_mem;
- u32 i, mem_size;
- u32 qpwantedoriginal, qpwanted, mrwanted, pblewanted;
- u64 sd_needed;
- u32 loop_count = 0;
-
- struct i40iw_hmc_info *hmc_info;
- struct i40iw_hmc_fpm_misc *hmc_fpm_misc;
- enum i40iw_status_code ret_code = 0;
-
- hmc_info = dev->hmc_info;
- hmc_fpm_misc = &dev->hmc_fpm_misc;
-
- ret_code = i40iw_sc_init_iw_hmc(dev, dev->hmc_fn_id);
- if (ret_code) {
- i40iw_debug(dev, I40IW_DEBUG_HMC,
- "i40iw_sc_init_iw_hmc returned error_code = %d\n",
- ret_code);
- return ret_code;
- }
-
- for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_MAX; i++)
- hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt;
- sd_needed = i40iw_est_sd(dev, hmc_info);
- i40iw_debug(dev, I40IW_DEBUG_HMC,
- "%s: FW initial max sd_count[%08lld] first_sd_index[%04d]\n",
- __func__, sd_needed, hmc_info->first_sd_index);
- i40iw_debug(dev, I40IW_DEBUG_HMC,
- "%s: sd count %d where max sd is %d\n",
- __func__, hmc_info->sd_table.sd_cnt,
- hmc_fpm_misc->max_sds);
-
- qpwanted = min(qp_count, hmc_info->hmc_obj[I40IW_HMC_IW_QP].max_cnt);
- qpwantedoriginal = qpwanted;
- mrwanted = hmc_info->hmc_obj[I40IW_HMC_IW_MR].max_cnt;
- pblewanted = hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].max_cnt;
-
- i40iw_debug(dev, I40IW_DEBUG_HMC,
- "req_qp=%d max_sd=%d, max_qp = %d, max_cq=%d, max_mr=%d, max_pble=%d\n",
- qp_count, hmc_fpm_misc->max_sds,
- hmc_info->hmc_obj[I40IW_HMC_IW_QP].max_cnt,
- hmc_info->hmc_obj[I40IW_HMC_IW_CQ].max_cnt,
- hmc_info->hmc_obj[I40IW_HMC_IW_MR].max_cnt,
- hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].max_cnt);
-
- do {
- ++loop_count;
- hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt = qpwanted;
- hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt =
- min(2 * qpwanted, hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt);
- hmc_info->hmc_obj[I40IW_HMC_IW_SRQ].cnt = 0x00; /* Reserved */
- hmc_info->hmc_obj[I40IW_HMC_IW_HTE].cnt =
- qpwanted * hmc_fpm_misc->ht_multiplier;
- hmc_info->hmc_obj[I40IW_HMC_IW_ARP].cnt =
- hmc_info->hmc_obj[I40IW_HMC_IW_ARP].max_cnt;
- hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].cnt = 1;
- hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt = mrwanted;
-
- hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt =
- roundup_pow_of_two(I40IW_MAX_WQ_ENTRIES * qpwanted);
- hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt =
- roundup_pow_of_two(2 * I40IW_MAX_IRD_SIZE * qpwanted);
- hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].cnt =
- hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt / hmc_fpm_misc->xf_block_size;
- hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].cnt =
- hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt / hmc_fpm_misc->q1_block_size;
- hmc_info->hmc_obj[I40IW_HMC_IW_TIMER].cnt =
- ((qpwanted) / 512 + 1) * hmc_fpm_misc->timer_bucket;
- hmc_info->hmc_obj[I40IW_HMC_IW_FSIMC].cnt = 0x00;
- hmc_info->hmc_obj[I40IW_HMC_IW_FSIAV].cnt = 0x00;
- hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt = pblewanted;
-
- /* How much memory is needed for all the objects. */
- sd_needed = i40iw_est_sd(dev, hmc_info);
- if ((loop_count > 1000) ||
- ((!(loop_count % 10)) &&
- (qpwanted > qpwantedoriginal * 2 / 3))) {
- if (qpwanted > FPM_MULTIPLIER)
- qpwanted = roundup_pow_of_two(qpwanted -
- FPM_MULTIPLIER);
- qpwanted >>= 1;
- }
- if (mrwanted > FPM_MULTIPLIER * 10)
- mrwanted -= FPM_MULTIPLIER * 10;
- if (pblewanted > FPM_MULTIPLIER * 1000)
- pblewanted -= FPM_MULTIPLIER * 1000;
- } while (sd_needed > hmc_fpm_misc->max_sds && loop_count < 2000);
-
- i40iw_debug(dev, I40IW_DEBUG_HMC,
- "loop_cnt=%d, sd_needed=%lld, qpcnt = %d, cqcnt=%d, mrcnt=%d, pblecnt=%d\n",
- loop_count, sd_needed,
- hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt,
- hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt,
- hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt,
- hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt);
-
- ret_code = i40iw_sc_configure_iw_fpm(dev, dev->hmc_fn_id);
- if (ret_code) {
- i40iw_debug(dev, I40IW_DEBUG_HMC,
- "configure_iw_fpm returned error_code[x%08X]\n",
- i40iw_rd32(dev->hw, dev->is_pf ? I40E_PFPE_CQPERRCODES : I40E_VFPE_CQPERRCODES1));
- return ret_code;
- }
-
- mem_size = sizeof(struct i40iw_hmc_sd_entry) *
- (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index + 1);
- ret_code = i40iw_allocate_virt_mem(dev->hw, &virt_mem, mem_size);
- if (ret_code) {
- i40iw_debug(dev, I40IW_DEBUG_HMC,
- "%s: failed to allocate memory for sd_entry buffer\n",
- __func__);
- return ret_code;
- }
- hmc_info->sd_table.sd_entry = virt_mem.va;
-
- return ret_code;
-}
-
-/**
- * i40iw_exec_cqp_cmd - execute cqp cmd when wqe are available
- * @dev: rdma device
- * @pcmdinfo: cqp command info
- */
-static enum i40iw_status_code i40iw_exec_cqp_cmd(struct i40iw_sc_dev *dev,
- struct cqp_commands_info *pcmdinfo)
-{
- enum i40iw_status_code status;
- struct i40iw_dma_mem values_mem;
-
- dev->cqp_cmd_stats[pcmdinfo->cqp_cmd]++;
- switch (pcmdinfo->cqp_cmd) {
- case OP_DELETE_LOCAL_MAC_IPADDR_ENTRY:
- status = i40iw_sc_del_local_mac_ipaddr_entry(
- pcmdinfo->in.u.del_local_mac_ipaddr_entry.cqp,
- pcmdinfo->in.u.del_local_mac_ipaddr_entry.scratch,
- pcmdinfo->in.u.del_local_mac_ipaddr_entry.entry_idx,
- pcmdinfo->in.u.del_local_mac_ipaddr_entry.ignore_ref_count,
- pcmdinfo->post_sq);
- break;
- case OP_CEQ_DESTROY:
- status = i40iw_sc_ceq_destroy(pcmdinfo->in.u.ceq_destroy.ceq,
- pcmdinfo->in.u.ceq_destroy.scratch,
- pcmdinfo->post_sq);
- break;
- case OP_AEQ_DESTROY:
- status = i40iw_sc_aeq_destroy(pcmdinfo->in.u.aeq_destroy.aeq,
- pcmdinfo->in.u.aeq_destroy.scratch,
- pcmdinfo->post_sq);
-
- break;
- case OP_DELETE_ARP_CACHE_ENTRY:
- status = i40iw_sc_del_arp_cache_entry(
- pcmdinfo->in.u.del_arp_cache_entry.cqp,
- pcmdinfo->in.u.del_arp_cache_entry.scratch,
- pcmdinfo->in.u.del_arp_cache_entry.arp_index,
- pcmdinfo->post_sq);
- break;
- case OP_MANAGE_APBVT_ENTRY:
- status = i40iw_sc_manage_apbvt_entry(
- pcmdinfo->in.u.manage_apbvt_entry.cqp,
- &pcmdinfo->in.u.manage_apbvt_entry.info,
- pcmdinfo->in.u.manage_apbvt_entry.scratch,
- pcmdinfo->post_sq);
- break;
- case OP_CEQ_CREATE:
- status = i40iw_sc_ceq_create(pcmdinfo->in.u.ceq_create.ceq,
- pcmdinfo->in.u.ceq_create.scratch,
- pcmdinfo->post_sq);
- break;
- case OP_AEQ_CREATE:
- status = i40iw_sc_aeq_create(pcmdinfo->in.u.aeq_create.aeq,
- pcmdinfo->in.u.aeq_create.scratch,
- pcmdinfo->post_sq);
- break;
- case OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY:
- status = i40iw_sc_alloc_local_mac_ipaddr_entry(
- pcmdinfo->in.u.alloc_local_mac_ipaddr_entry.cqp,
- pcmdinfo->in.u.alloc_local_mac_ipaddr_entry.scratch,
- pcmdinfo->post_sq);
- break;
- case OP_ADD_LOCAL_MAC_IPADDR_ENTRY:
- status = i40iw_sc_add_local_mac_ipaddr_entry(
- pcmdinfo->in.u.add_local_mac_ipaddr_entry.cqp,
- &pcmdinfo->in.u.add_local_mac_ipaddr_entry.info,
- pcmdinfo->in.u.add_local_mac_ipaddr_entry.scratch,
- pcmdinfo->post_sq);
- break;
- case OP_MANAGE_QHASH_TABLE_ENTRY:
- status = i40iw_sc_manage_qhash_table_entry(
- pcmdinfo->in.u.manage_qhash_table_entry.cqp,
- &pcmdinfo->in.u.manage_qhash_table_entry.info,
- pcmdinfo->in.u.manage_qhash_table_entry.scratch,
- pcmdinfo->post_sq);
-
- break;
- case OP_QP_MODIFY:
- status = i40iw_sc_qp_modify(
- pcmdinfo->in.u.qp_modify.qp,
- &pcmdinfo->in.u.qp_modify.info,
- pcmdinfo->in.u.qp_modify.scratch,
- pcmdinfo->post_sq);
-
- break;
- case OP_QP_UPLOAD_CONTEXT:
- status = i40iw_sc_qp_upload_context(
- pcmdinfo->in.u.qp_upload_context.dev,
- &pcmdinfo->in.u.qp_upload_context.info,
- pcmdinfo->in.u.qp_upload_context.scratch,
- pcmdinfo->post_sq);
-
- break;
- case OP_CQ_CREATE:
- status = i40iw_sc_cq_create(
- pcmdinfo->in.u.cq_create.cq,
- pcmdinfo->in.u.cq_create.scratch,
- pcmdinfo->in.u.cq_create.check_overflow,
- pcmdinfo->post_sq);
- break;
- case OP_CQ_DESTROY:
- status = i40iw_sc_cq_destroy(
- pcmdinfo->in.u.cq_destroy.cq,
- pcmdinfo->in.u.cq_destroy.scratch,
- pcmdinfo->post_sq);
-
- break;
- case OP_QP_CREATE:
- status = i40iw_sc_qp_create(
- pcmdinfo->in.u.qp_create.qp,
- &pcmdinfo->in.u.qp_create.info,
- pcmdinfo->in.u.qp_create.scratch,
- pcmdinfo->post_sq);
- break;
- case OP_QP_DESTROY:
- status = i40iw_sc_qp_destroy(
- pcmdinfo->in.u.qp_destroy.qp,
- pcmdinfo->in.u.qp_destroy.scratch,
- pcmdinfo->in.u.qp_destroy.remove_hash_idx,
- pcmdinfo->in.u.qp_destroy.
- ignore_mw_bnd,
- pcmdinfo->post_sq);
-
- break;
- case OP_ALLOC_STAG:
- status = i40iw_sc_alloc_stag(
- pcmdinfo->in.u.alloc_stag.dev,
- &pcmdinfo->in.u.alloc_stag.info,
- pcmdinfo->in.u.alloc_stag.scratch,
- pcmdinfo->post_sq);
- break;
- case OP_MR_REG_NON_SHARED:
- status = i40iw_sc_mr_reg_non_shared(
- pcmdinfo->in.u.mr_reg_non_shared.dev,
- &pcmdinfo->in.u.mr_reg_non_shared.info,
- pcmdinfo->in.u.mr_reg_non_shared.scratch,
- pcmdinfo->post_sq);
-
- break;
- case OP_DEALLOC_STAG:
- status = i40iw_sc_dealloc_stag(
- pcmdinfo->in.u.dealloc_stag.dev,
- &pcmdinfo->in.u.dealloc_stag.info,
- pcmdinfo->in.u.dealloc_stag.scratch,
- pcmdinfo->post_sq);
-
- break;
- case OP_MW_ALLOC:
- status = i40iw_sc_mw_alloc(
- pcmdinfo->in.u.mw_alloc.dev,
- pcmdinfo->in.u.mw_alloc.scratch,
- pcmdinfo->in.u.mw_alloc.mw_stag_index,
- pcmdinfo->in.u.mw_alloc.pd_id,
- pcmdinfo->post_sq);
-
- break;
- case OP_QP_FLUSH_WQES:
- status = i40iw_sc_qp_flush_wqes(
- pcmdinfo->in.u.qp_flush_wqes.qp,
- &pcmdinfo->in.u.qp_flush_wqes.info,
- pcmdinfo->in.u.qp_flush_wqes.
- scratch, pcmdinfo->post_sq);
- break;
- case OP_GEN_AE:
- status = i40iw_sc_gen_ae(
- pcmdinfo->in.u.gen_ae.qp,
- &pcmdinfo->in.u.gen_ae.info,
- pcmdinfo->in.u.gen_ae.scratch,
- pcmdinfo->post_sq);
- break;
- case OP_ADD_ARP_CACHE_ENTRY:
- status = i40iw_sc_add_arp_cache_entry(
- pcmdinfo->in.u.add_arp_cache_entry.cqp,
- &pcmdinfo->in.u.add_arp_cache_entry.info,
- pcmdinfo->in.u.add_arp_cache_entry.scratch,
- pcmdinfo->post_sq);
- break;
- case OP_UPDATE_PE_SDS:
- /* case I40IW_CQP_OP_UPDATE_PE_SDS */
- status = i40iw_update_pe_sds(
- pcmdinfo->in.u.update_pe_sds.dev,
- &pcmdinfo->in.u.update_pe_sds.info,
- pcmdinfo->in.u.update_pe_sds.
- scratch);
-
- break;
- case OP_MANAGE_HMC_PM_FUNC_TABLE:
- status = i40iw_sc_manage_hmc_pm_func_table(
- pcmdinfo->in.u.manage_hmc_pm.dev->cqp,
- pcmdinfo->in.u.manage_hmc_pm.scratch,
- (u8)pcmdinfo->in.u.manage_hmc_pm.info.vf_id,
- pcmdinfo->in.u.manage_hmc_pm.info.free_fcn,
- true);
- break;
- case OP_SUSPEND:
- status = i40iw_sc_suspend_qp(
- pcmdinfo->in.u.suspend_resume.cqp,
- pcmdinfo->in.u.suspend_resume.qp,
- pcmdinfo->in.u.suspend_resume.scratch);
- break;
- case OP_RESUME:
- status = i40iw_sc_resume_qp(
- pcmdinfo->in.u.suspend_resume.cqp,
- pcmdinfo->in.u.suspend_resume.qp,
- pcmdinfo->in.u.suspend_resume.scratch);
- break;
- case OP_MANAGE_VF_PBLE_BP:
- status = i40iw_manage_vf_pble_bp(
- pcmdinfo->in.u.manage_vf_pble_bp.cqp,
- &pcmdinfo->in.u.manage_vf_pble_bp.info,
- pcmdinfo->in.u.manage_vf_pble_bp.scratch, true);
- break;
- case OP_QUERY_FPM_VALUES:
- values_mem.pa = pcmdinfo->in.u.query_fpm_values.fpm_values_pa;
- values_mem.va = pcmdinfo->in.u.query_fpm_values.fpm_values_va;
- status = i40iw_sc_query_fpm_values(
- pcmdinfo->in.u.query_fpm_values.cqp,
- pcmdinfo->in.u.query_fpm_values.scratch,
- pcmdinfo->in.u.query_fpm_values.hmc_fn_id,
- &values_mem, true, I40IW_CQP_WAIT_EVENT);
- break;
- case OP_COMMIT_FPM_VALUES:
- values_mem.pa = pcmdinfo->in.u.commit_fpm_values.fpm_values_pa;
- values_mem.va = pcmdinfo->in.u.commit_fpm_values.fpm_values_va;
- status = i40iw_sc_commit_fpm_values(
- pcmdinfo->in.u.commit_fpm_values.cqp,
- pcmdinfo->in.u.commit_fpm_values.scratch,
- pcmdinfo->in.u.commit_fpm_values.hmc_fn_id,
- &values_mem,
- true,
- I40IW_CQP_WAIT_EVENT);
- break;
- case OP_QUERY_RDMA_FEATURES:
- values_mem.pa = pcmdinfo->in.u.query_rdma_features.cap_pa;
- values_mem.va = pcmdinfo->in.u.query_rdma_features.cap_va;
- status = i40iw_sc_query_rdma_features(
- pcmdinfo->in.u.query_rdma_features.cqp, &values_mem,
- pcmdinfo->in.u.query_rdma_features.scratch);
- break;
- default:
- status = I40IW_NOT_SUPPORTED;
- break;
- }
-
- return status;
-}
-
-/**
- * i40iw_process_cqp_cmd - process all cqp commands
- * @dev: sc device struct
- * @pcmdinfo: cqp command info
- */
-enum i40iw_status_code i40iw_process_cqp_cmd(struct i40iw_sc_dev *dev,
- struct cqp_commands_info *pcmdinfo)
-{
- enum i40iw_status_code status = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->cqp_lock, flags);
- if (list_empty(&dev->cqp_cmd_head) && !i40iw_ring_full(dev->cqp))
- status = i40iw_exec_cqp_cmd(dev, pcmdinfo);
- else
- list_add_tail(&pcmdinfo->cqp_cmd_entry, &dev->cqp_cmd_head);
- spin_unlock_irqrestore(&dev->cqp_lock, flags);
- return status;
-}
-
-/**
- * i40iw_process_bh - called from tasklet for cqp list
- * @dev: sc device struct
- */
-enum i40iw_status_code i40iw_process_bh(struct i40iw_sc_dev *dev)
-{
- enum i40iw_status_code status = 0;
- struct cqp_commands_info *pcmdinfo;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->cqp_lock, flags);
- while (!list_empty(&dev->cqp_cmd_head) && !i40iw_ring_full(dev->cqp)) {
- pcmdinfo = (struct cqp_commands_info *)i40iw_remove_head(&dev->cqp_cmd_head);
-
- status = i40iw_exec_cqp_cmd(dev, pcmdinfo);
- if (status)
- break;
- }
- spin_unlock_irqrestore(&dev->cqp_lock, flags);
- return status;
-}
-
-/**
- * i40iw_iwarp_opcode - determine if incoming is rdma layer
- * @info: aeq info for the packet
- * @pkt: packet for error
- */
-static u32 i40iw_iwarp_opcode(struct i40iw_aeqe_info *info, u8 *pkt)
-{
- __be16 *mpa;
- u32 opcode = 0xffffffff;
-
- if (info->q2_data_written) {
- mpa = (__be16 *)pkt;
- opcode = ntohs(mpa[1]) & 0xf;
- }
- return opcode;
-}
-
-/**
- * i40iw_locate_mpa - return pointer to mpa in the pkt
- * @pkt: packet with data
- */
-static u8 *i40iw_locate_mpa(u8 *pkt)
-{
- /* skip over ethernet header */
- pkt += I40IW_MAC_HLEN;
-
- /* Skip over IP and TCP headers */
- pkt += 4 * (pkt[0] & 0x0f);
- pkt += 4 * ((pkt[12] >> 4) & 0x0f);
- return pkt;
-}
-
-/**
- * i40iw_setup_termhdr - termhdr for terminate pkt
- * @qp: sc qp ptr for pkt
- * @hdr: term hdr
- * @opcode: flush opcode for termhdr
- * @layer_etype: error layer + error type
- * @err: error cod ein the header
- */
-static void i40iw_setup_termhdr(struct i40iw_sc_qp *qp,
- struct i40iw_terminate_hdr *hdr,
- enum i40iw_flush_opcode opcode,
- u8 layer_etype,
- u8 err)
-{
- qp->flush_code = opcode;
- hdr->layer_etype = layer_etype;
- hdr->error_code = err;
-}
-
-/**
- * i40iw_bld_terminate_hdr - build terminate message header
- * @qp: qp associated with received terminate AE
- * @info: the struct contiaing AE information
- */
-static int i40iw_bld_terminate_hdr(struct i40iw_sc_qp *qp,
- struct i40iw_aeqe_info *info)
-{
- u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
- u16 ddp_seg_len;
- int copy_len = 0;
- u8 is_tagged = 0;
- u32 opcode;
- struct i40iw_terminate_hdr *termhdr;
-
- termhdr = (struct i40iw_terminate_hdr *)qp->q2_buf;
- memset(termhdr, 0, Q2_BAD_FRAME_OFFSET);
-
- if (info->q2_data_written) {
- /* Use data from offending packet to fill in ddp & rdma hdrs */
- pkt = i40iw_locate_mpa(pkt);
- ddp_seg_len = ntohs(*(__be16 *)pkt);
- if (ddp_seg_len) {
- copy_len = 2;
- termhdr->hdrct = DDP_LEN_FLAG;
- if (pkt[2] & 0x80) {
- is_tagged = 1;
- if (ddp_seg_len >= TERM_DDP_LEN_TAGGED) {
- copy_len += TERM_DDP_LEN_TAGGED;
- termhdr->hdrct |= DDP_HDR_FLAG;
- }
- } else {
- if (ddp_seg_len >= TERM_DDP_LEN_UNTAGGED) {
- copy_len += TERM_DDP_LEN_UNTAGGED;
- termhdr->hdrct |= DDP_HDR_FLAG;
- }
-
- if (ddp_seg_len >= (TERM_DDP_LEN_UNTAGGED + TERM_RDMA_LEN)) {
- if ((pkt[3] & RDMA_OPCODE_MASK) == RDMA_READ_REQ_OPCODE) {
- copy_len += TERM_RDMA_LEN;
- termhdr->hdrct |= RDMA_HDR_FLAG;
- }
- }
- }
- }
- }
-
- opcode = i40iw_iwarp_opcode(info, pkt);
-
- switch (info->ae_id) {
- case I40IW_AE_AMP_UNALLOCATED_STAG:
- qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
- if (opcode == I40IW_OP_TYPE_RDMA_WRITE)
- i40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR,
- (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_INV_STAG);
- else
- i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
- (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_STAG);
- break;
- case I40IW_AE_AMP_BOUNDS_VIOLATION:
- qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
- if (info->q2_data_written)
- i40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR,
- (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_BOUNDS);
- else
- i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
- (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_BOUNDS);
- break;
- case I40IW_AE_AMP_BAD_PD:
- switch (opcode) {
- case I40IW_OP_TYPE_RDMA_WRITE:
- i40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR,
- (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_UNASSOC_STAG);
- break;
- case I40IW_OP_TYPE_SEND_INV:
- case I40IW_OP_TYPE_SEND_SOL_INV:
- i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
- (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_CANT_INV_STAG);
- break;
- default:
- i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
- (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_UNASSOC_STAG);
- }
- break;
- case I40IW_AE_AMP_INVALID_STAG:
- qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
- i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
- (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_STAG);
- break;
- case I40IW_AE_AMP_BAD_QP:
- i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_QP_OP_ERR,
- (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_QN);
- break;
- case I40IW_AE_AMP_BAD_STAG_KEY:
- case I40IW_AE_AMP_BAD_STAG_INDEX:
- qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
- switch (opcode) {
- case I40IW_OP_TYPE_SEND_INV:
- case I40IW_OP_TYPE_SEND_SOL_INV:
- i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_OP_ERR,
- (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_CANT_INV_STAG);
- break;
- default:
- i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
- (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_INV_STAG);
- }
- break;
- case I40IW_AE_AMP_RIGHTS_VIOLATION:
- case I40IW_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
- case I40IW_AE_PRIV_OPERATION_DENIED:
- qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
- i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
- (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_ACCESS);
- break;
- case I40IW_AE_AMP_TO_WRAP:
- qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
- i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
- (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_TO_WRAP);
- break;
- case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR:
- i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
- (LAYER_MPA << 4) | DDP_LLP, MPA_CRC);
- break;
- case I40IW_AE_LLP_SEGMENT_TOO_LARGE:
- case I40IW_AE_LLP_SEGMENT_TOO_SMALL:
- i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR,
- (LAYER_DDP << 4) | DDP_CATASTROPHIC, DDP_CATASTROPHIC_LOCAL);
- break;
- case I40IW_AE_LCE_QP_CATASTROPHIC:
- case I40IW_AE_DDP_NO_L_BIT:
- i40iw_setup_termhdr(qp, termhdr, FLUSH_FATAL_ERR,
- (LAYER_DDP << 4) | DDP_CATASTROPHIC, DDP_CATASTROPHIC_LOCAL);
- break;
- case I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN:
- i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
- (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MSN_RANGE);
- break;
- case I40IW_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
- qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
- i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR,
- (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_TOO_LONG);
- break;
- case I40IW_AE_DDP_UBE_INVALID_DDP_VERSION:
- if (is_tagged)
- i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
- (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_INV_DDP_VER);
- else
- i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
- (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_DDP_VER);
- break;
- case I40IW_AE_DDP_UBE_INVALID_MO:
- i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
- (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MO);
- break;
- case I40IW_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
- i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_OP_ERR,
- (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MSN_NO_BUF);
- break;
- case I40IW_AE_DDP_UBE_INVALID_QN:
- i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
- (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_QN);
- break;
- case I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
- i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
- (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_INV_RDMAP_VER);
- break;
- case I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
- i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_QP_OP_ERR,
- (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_UNEXPECTED_OP);
- break;
- default:
- i40iw_setup_termhdr(qp, termhdr, FLUSH_FATAL_ERR,
- (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_UNSPECIFIED);
- break;
- }
-
- if (copy_len)
- memcpy(termhdr + 1, pkt, copy_len);
-
- return sizeof(struct i40iw_terminate_hdr) + copy_len;
-}
-
-/**
- * i40iw_terminate_send_fin() - Send fin for terminate message
- * @qp: qp associated with received terminate AE
- */
-void i40iw_terminate_send_fin(struct i40iw_sc_qp *qp)
-{
- /* Send the fin only */
- i40iw_term_modify_qp(qp,
- I40IW_QP_STATE_TERMINATE,
- I40IWQP_TERM_SEND_FIN_ONLY,
- 0);
-}
-
-/**
- * i40iw_terminate_connection() - Bad AE and send terminate to remote QP
- * @qp: qp associated with received terminate AE
- * @info: the struct contiaing AE information
- */
-void i40iw_terminate_connection(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info)
-{
- u8 termlen = 0;
-
- if (qp->term_flags & I40IW_TERM_SENT)
- return; /* Sanity check */
-
- /* Eventtype can change from bld_terminate_hdr */
- qp->eventtype = TERM_EVENT_QP_FATAL;
- termlen = i40iw_bld_terminate_hdr(qp, info);
- i40iw_terminate_start_timer(qp);
- qp->term_flags |= I40IW_TERM_SENT;
- i40iw_term_modify_qp(qp, I40IW_QP_STATE_TERMINATE,
- I40IWQP_TERM_SEND_TERM_ONLY, termlen);
-}
-
-/**
- * i40iw_terminate_received - handle terminate received AE
- * @qp: qp associated with received terminate AE
- * @info: the struct contiaing AE information
- */
-void i40iw_terminate_received(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info)
-{
- u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
- __be32 *mpa;
- u8 ddp_ctl;
- u8 rdma_ctl;
- u16 aeq_id = 0;
- struct i40iw_terminate_hdr *termhdr;
-
- mpa = (__be32 *)i40iw_locate_mpa(pkt);
- if (info->q2_data_written) {
- /* did not validate the frame - do it now */
- ddp_ctl = (ntohl(mpa[0]) >> 8) & 0xff;
- rdma_ctl = ntohl(mpa[0]) & 0xff;
- if ((ddp_ctl & 0xc0) != 0x40)
- aeq_id = I40IW_AE_LCE_QP_CATASTROPHIC;
- else if ((ddp_ctl & 0x03) != 1)
- aeq_id = I40IW_AE_DDP_UBE_INVALID_DDP_VERSION;
- else if (ntohl(mpa[2]) != 2)
- aeq_id = I40IW_AE_DDP_UBE_INVALID_QN;
- else if (ntohl(mpa[3]) != 1)
- aeq_id = I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN;
- else if (ntohl(mpa[4]) != 0)
- aeq_id = I40IW_AE_DDP_UBE_INVALID_MO;
- else if ((rdma_ctl & 0xc0) != 0x40)
- aeq_id = I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION;
-
- info->ae_id = aeq_id;
- if (info->ae_id) {
- /* Bad terminate recvd - send back a terminate */
- i40iw_terminate_connection(qp, info);
- return;
- }
- }
-
- qp->term_flags |= I40IW_TERM_RCVD;
- qp->eventtype = TERM_EVENT_QP_FATAL;
- termhdr = (struct i40iw_terminate_hdr *)&mpa[5];
- if (termhdr->layer_etype == RDMAP_REMOTE_PROT ||
- termhdr->layer_etype == RDMAP_REMOTE_OP) {
- i40iw_terminate_done(qp, 0);
- } else {
- i40iw_terminate_start_timer(qp);
- i40iw_terminate_send_fin(qp);
- }
-}
-
-/**
- * i40iw_sc_vsi_init - Initialize virtual device
- * @vsi: pointer to the vsi structure
- * @info: parameters to initialize vsi
- **/
-void i40iw_sc_vsi_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_init_info *info)
-{
- int i;
-
- vsi->dev = info->dev;
- vsi->back_vsi = info->back_vsi;
- vsi->mtu = info->params->mtu;
- vsi->exception_lan_queue = info->exception_lan_queue;
- i40iw_fill_qos_list(info->params->qs_handle_list);
-
- for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
- vsi->qos[i].qs_handle = info->params->qs_handle_list[i];
- i40iw_debug(vsi->dev, I40IW_DEBUG_DCB, "qset[%d]: %d\n", i,
- vsi->qos[i].qs_handle);
- spin_lock_init(&vsi->qos[i].lock);
- INIT_LIST_HEAD(&vsi->qos[i].qplist);
- }
-}
-
-/**
- * i40iw_hw_stats_init - Initiliaze HW stats table
- * @stats: pestat struct
- * @fcn_idx: PCI fn id
- * @is_pf: Is it a PF?
- *
- * Populate the HW stats table with register offset addr for each
- * stats. And start the perioidic stats timer.
- */
-void i40iw_hw_stats_init(struct i40iw_vsi_pestat *stats, u8 fcn_idx, bool is_pf)
-{
- u32 stats_reg_offset;
- u32 stats_index;
- struct i40iw_dev_hw_stats_offsets *stats_table =
- &stats->hw_stats_offsets;
- struct i40iw_dev_hw_stats *last_rd_stats = &stats->last_read_hw_stats;
-
- if (is_pf) {
- stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =
- I40E_GLPES_PFIP4RXDISCARD(fcn_idx);
- stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =
- I40E_GLPES_PFIP4RXTRUNC(fcn_idx);
- stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =
- I40E_GLPES_PFIP4TXNOROUTE(fcn_idx);
- stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =
- I40E_GLPES_PFIP6RXDISCARD(fcn_idx);
- stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =
- I40E_GLPES_PFIP6RXTRUNC(fcn_idx);
- stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =
- I40E_GLPES_PFIP6TXNOROUTE(fcn_idx);
- stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =
- I40E_GLPES_PFTCPRTXSEG(fcn_idx);
- stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =
- I40E_GLPES_PFTCPRXOPTERR(fcn_idx);
- stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =
- I40E_GLPES_PFTCPRXPROTOERR(fcn_idx);
-
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =
- I40E_GLPES_PFIP4RXOCTSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =
- I40E_GLPES_PFIP4RXPKTSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =
- I40E_GLPES_PFIP4RXFRAGSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =
- I40E_GLPES_PFIP4RXMCPKTSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =
- I40E_GLPES_PFIP4TXOCTSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =
- I40E_GLPES_PFIP4TXPKTSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =
- I40E_GLPES_PFIP4TXFRAGSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =
- I40E_GLPES_PFIP4TXMCPKTSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =
- I40E_GLPES_PFIP6RXOCTSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =
- I40E_GLPES_PFIP6RXPKTSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =
- I40E_GLPES_PFIP6RXFRAGSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =
- I40E_GLPES_PFIP6RXMCPKTSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =
- I40E_GLPES_PFIP6TXOCTSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
- I40E_GLPES_PFIP6TXPKTSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
- I40E_GLPES_PFIP6TXPKTSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =
- I40E_GLPES_PFIP6TXFRAGSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =
- I40E_GLPES_PFTCPRXSEGSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =
- I40E_GLPES_PFTCPTXSEGLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =
- I40E_GLPES_PFRDMARXRDSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =
- I40E_GLPES_PFRDMARXSNDSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =
- I40E_GLPES_PFRDMARXWRSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =
- I40E_GLPES_PFRDMATXRDSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =
- I40E_GLPES_PFRDMATXSNDSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =
- I40E_GLPES_PFRDMATXWRSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =
- I40E_GLPES_PFRDMAVBNDLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =
- I40E_GLPES_PFRDMAVINVLO(fcn_idx);
- } else {
- stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =
- I40E_GLPES_VFIP4RXDISCARD(fcn_idx);
- stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =
- I40E_GLPES_VFIP4RXTRUNC(fcn_idx);
- stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =
- I40E_GLPES_VFIP4TXNOROUTE(fcn_idx);
- stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =
- I40E_GLPES_VFIP6RXDISCARD(fcn_idx);
- stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =
- I40E_GLPES_VFIP6RXTRUNC(fcn_idx);
- stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =
- I40E_GLPES_VFIP6TXNOROUTE(fcn_idx);
- stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =
- I40E_GLPES_VFTCPRTXSEG(fcn_idx);
- stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =
- I40E_GLPES_VFTCPRXOPTERR(fcn_idx);
- stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =
- I40E_GLPES_VFTCPRXPROTOERR(fcn_idx);
-
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =
- I40E_GLPES_VFIP4RXOCTSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =
- I40E_GLPES_VFIP4RXPKTSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =
- I40E_GLPES_VFIP4RXFRAGSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =
- I40E_GLPES_VFIP4RXMCPKTSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =
- I40E_GLPES_VFIP4TXOCTSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =
- I40E_GLPES_VFIP4TXPKTSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =
- I40E_GLPES_VFIP4TXFRAGSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =
- I40E_GLPES_VFIP4TXMCPKTSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =
- I40E_GLPES_VFIP6RXOCTSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =
- I40E_GLPES_VFIP6RXPKTSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =
- I40E_GLPES_VFIP6RXFRAGSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =
- I40E_GLPES_VFIP6RXMCPKTSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =
- I40E_GLPES_VFIP6TXOCTSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
- I40E_GLPES_VFIP6TXPKTSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
- I40E_GLPES_VFIP6TXPKTSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =
- I40E_GLPES_VFIP6TXFRAGSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =
- I40E_GLPES_VFTCPRXSEGSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =
- I40E_GLPES_VFTCPTXSEGLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =
- I40E_GLPES_VFRDMARXRDSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =
- I40E_GLPES_VFRDMARXSNDSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =
- I40E_GLPES_VFRDMARXWRSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =
- I40E_GLPES_VFRDMATXRDSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =
- I40E_GLPES_VFRDMATXSNDSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =
- I40E_GLPES_VFRDMATXWRSLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =
- I40E_GLPES_VFRDMAVBNDLO(fcn_idx);
- stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =
- I40E_GLPES_VFRDMAVINVLO(fcn_idx);
- }
-
- for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_64;
- stats_index++) {
- stats_reg_offset = stats_table->stats_offset_64[stats_index];
- last_rd_stats->stats_value_64[stats_index] =
- readq(stats->hw->hw_addr + stats_reg_offset);
- }
-
- for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_32;
- stats_index++) {
- stats_reg_offset = stats_table->stats_offset_32[stats_index];
- last_rd_stats->stats_value_32[stats_index] =
- i40iw_rd32(stats->hw, stats_reg_offset);
- }
-}
-
-/**
- * i40iw_hw_stats_read_32 - Read 32-bit HW stats counters and accommodates for roll-overs.
- * @stats: pestat struct
- * @index: index in HW stats table which contains offset reg-addr
- * @value: hw stats value
- */
-void i40iw_hw_stats_read_32(struct i40iw_vsi_pestat *stats,
- enum i40iw_hw_stats_index_32b index,
- u64 *value)
-{
- struct i40iw_dev_hw_stats_offsets *stats_table =
- &stats->hw_stats_offsets;
- struct i40iw_dev_hw_stats *last_rd_stats = &stats->last_read_hw_stats;
- struct i40iw_dev_hw_stats *hw_stats = &stats->hw_stats;
- u64 new_stats_value = 0;
- u32 stats_reg_offset = stats_table->stats_offset_32[index];
-
- new_stats_value = i40iw_rd32(stats->hw, stats_reg_offset);
- /*roll-over case */
- if (new_stats_value < last_rd_stats->stats_value_32[index])
- hw_stats->stats_value_32[index] += new_stats_value;
- else
- hw_stats->stats_value_32[index] +=
- new_stats_value - last_rd_stats->stats_value_32[index];
- last_rd_stats->stats_value_32[index] = new_stats_value;
- *value = hw_stats->stats_value_32[index];
-}
-
-/**
- * i40iw_hw_stats_read_64 - Read HW stats counters (greater than 32-bit) and accommodates for roll-overs.
- * @stats: pestat struct
- * @index: index in HW stats table which contains offset reg-addr
- * @value: hw stats value
- */
-void i40iw_hw_stats_read_64(struct i40iw_vsi_pestat *stats,
- enum i40iw_hw_stats_index_64b index,
- u64 *value)
-{
- struct i40iw_dev_hw_stats_offsets *stats_table =
- &stats->hw_stats_offsets;
- struct i40iw_dev_hw_stats *last_rd_stats = &stats->last_read_hw_stats;
- struct i40iw_dev_hw_stats *hw_stats = &stats->hw_stats;
- u64 new_stats_value = 0;
- u32 stats_reg_offset = stats_table->stats_offset_64[index];
-
- new_stats_value = readq(stats->hw->hw_addr + stats_reg_offset);
- /*roll-over case */
- if (new_stats_value < last_rd_stats->stats_value_64[index])
- hw_stats->stats_value_64[index] += new_stats_value;
- else
- hw_stats->stats_value_64[index] +=
- new_stats_value - last_rd_stats->stats_value_64[index];
- last_rd_stats->stats_value_64[index] = new_stats_value;
- *value = hw_stats->stats_value_64[index];
-}
-
-/**
- * i40iw_hw_stats_read_all - read all HW stat counters
- * @stats: pestat struct
- * @stats_values: hw stats structure
- *
- * Read all the HW stat counters and populates hw_stats structure
- * of passed-in vsi's pestat as well as copy created in stat_values.
- */
-void i40iw_hw_stats_read_all(struct i40iw_vsi_pestat *stats,
- struct i40iw_dev_hw_stats *stats_values)
-{
- u32 stats_index;
- unsigned long flags;
-
- spin_lock_irqsave(&stats->lock, flags);
-
- for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_32;
- stats_index++)
- i40iw_hw_stats_read_32(stats, stats_index,
- &stats_values->stats_value_32[stats_index]);
- for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_64;
- stats_index++)
- i40iw_hw_stats_read_64(stats, stats_index,
- &stats_values->stats_value_64[stats_index]);
- spin_unlock_irqrestore(&stats->lock, flags);
-}
-
-/**
- * i40iw_hw_stats_refresh_all - Update all HW stats structs
- * @stats: pestat struct
- *
- * Read all the HW stats counters to refresh values in hw_stats structure
- * of passed-in dev's pestat
- */
-void i40iw_hw_stats_refresh_all(struct i40iw_vsi_pestat *stats)
-{
- u64 stats_value;
- u32 stats_index;
- unsigned long flags;
-
- spin_lock_irqsave(&stats->lock, flags);
-
- for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_32;
- stats_index++)
- i40iw_hw_stats_read_32(stats, stats_index, &stats_value);
- for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_64;
- stats_index++)
- i40iw_hw_stats_read_64(stats, stats_index, &stats_value);
- spin_unlock_irqrestore(&stats->lock, flags);
-}
-
-/**
- * i40iw_get_fcn_id - Return the function id
- * @dev: pointer to the device
- */
-static u8 i40iw_get_fcn_id(struct i40iw_sc_dev *dev)
-{
- u8 fcn_id = I40IW_INVALID_FCN_ID;
- u8 i;
-
- for (i = I40IW_FIRST_NON_PF_STAT; i < I40IW_MAX_STATS_COUNT; i++)
- if (!dev->fcn_id_array[i]) {
- fcn_id = i;
- dev->fcn_id_array[i] = true;
- break;
- }
- return fcn_id;
-}
-
-/**
- * i40iw_vsi_stats_init - Initialize the vsi statistics
- * @vsi: pointer to the vsi structure
- * @info: The info structure used for initialization
- */
-enum i40iw_status_code i40iw_vsi_stats_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_stats_info *info)
-{
- u8 fcn_id = info->fcn_id;
-
- if (info->alloc_fcn_id)
- fcn_id = i40iw_get_fcn_id(vsi->dev);
-
- if (fcn_id == I40IW_INVALID_FCN_ID)
- return I40IW_ERR_NOT_READY;
-
- vsi->pestat = info->pestat;
- vsi->pestat->hw = vsi->dev->hw;
- vsi->pestat->vsi = vsi;
-
- if (info->stats_initialize) {
- i40iw_hw_stats_init(vsi->pestat, fcn_id, true);
- spin_lock_init(&vsi->pestat->lock);
- i40iw_hw_stats_start_timer(vsi);
- }
- vsi->stats_fcn_id_alloc = info->alloc_fcn_id;
- vsi->fcn_id = fcn_id;
- return I40IW_SUCCESS;
-}
-
-/**
- * i40iw_vsi_stats_free - Free the vsi stats
- * @vsi: pointer to the vsi structure
- */
-void i40iw_vsi_stats_free(struct i40iw_sc_vsi *vsi)
-{
- u8 fcn_id = vsi->fcn_id;
-
- if (vsi->stats_fcn_id_alloc && fcn_id < I40IW_MAX_STATS_COUNT)
- vsi->dev->fcn_id_array[fcn_id] = false;
- i40iw_hw_stats_stop_timer(vsi);
-}
-
-static const struct i40iw_cqp_ops iw_cqp_ops = {
- .cqp_init = i40iw_sc_cqp_init,
- .cqp_create = i40iw_sc_cqp_create,
- .cqp_post_sq = i40iw_sc_cqp_post_sq,
- .cqp_get_next_send_wqe = i40iw_sc_cqp_get_next_send_wqe,
- .cqp_destroy = i40iw_sc_cqp_destroy,
- .poll_for_cqp_op_done = i40iw_sc_poll_for_cqp_op_done
-};
-
-static const struct i40iw_ccq_ops iw_ccq_ops = {
- .ccq_init = i40iw_sc_ccq_init,
- .ccq_create = i40iw_sc_ccq_create,
- .ccq_destroy = i40iw_sc_ccq_destroy,
- .ccq_create_done = i40iw_sc_ccq_create_done,
- .ccq_get_cqe_info = i40iw_sc_ccq_get_cqe_info,
- .ccq_arm = i40iw_sc_ccq_arm
-};
-
-static const struct i40iw_ceq_ops iw_ceq_ops = {
- .ceq_init = i40iw_sc_ceq_init,
- .ceq_create = i40iw_sc_ceq_create,
- .cceq_create_done = i40iw_sc_cceq_create_done,
- .cceq_destroy_done = i40iw_sc_cceq_destroy_done,
- .cceq_create = i40iw_sc_cceq_create,
- .ceq_destroy = i40iw_sc_ceq_destroy,
- .process_ceq = i40iw_sc_process_ceq
-};
-
-static const struct i40iw_aeq_ops iw_aeq_ops = {
- .aeq_init = i40iw_sc_aeq_init,
- .aeq_create = i40iw_sc_aeq_create,
- .aeq_destroy = i40iw_sc_aeq_destroy,
- .get_next_aeqe = i40iw_sc_get_next_aeqe,
- .repost_aeq_entries = i40iw_sc_repost_aeq_entries,
- .aeq_create_done = i40iw_sc_aeq_create_done,
- .aeq_destroy_done = i40iw_sc_aeq_destroy_done
-};
-
-/* iwarp pd ops */
-static const struct i40iw_pd_ops iw_pd_ops = {
- .pd_init = i40iw_sc_pd_init,
-};
-
-static const struct i40iw_priv_qp_ops iw_priv_qp_ops = {
- .qp_init = i40iw_sc_qp_init,
- .qp_create = i40iw_sc_qp_create,
- .qp_modify = i40iw_sc_qp_modify,
- .qp_destroy = i40iw_sc_qp_destroy,
- .qp_flush_wqes = i40iw_sc_qp_flush_wqes,
- .qp_upload_context = i40iw_sc_qp_upload_context,
- .qp_setctx = i40iw_sc_qp_setctx,
- .qp_send_lsmm = i40iw_sc_send_lsmm,
- .qp_send_lsmm_nostag = i40iw_sc_send_lsmm_nostag,
- .qp_send_rtt = i40iw_sc_send_rtt,
- .qp_post_wqe0 = i40iw_sc_post_wqe0,
- .iw_mr_fast_register = i40iw_sc_mr_fast_register
-};
-
-static const struct i40iw_priv_cq_ops iw_priv_cq_ops = {
- .cq_init = i40iw_sc_cq_init,
- .cq_create = i40iw_sc_cq_create,
- .cq_destroy = i40iw_sc_cq_destroy,
- .cq_modify = i40iw_sc_cq_modify,
-};
-
-static const struct i40iw_mr_ops iw_mr_ops = {
- .alloc_stag = i40iw_sc_alloc_stag,
- .mr_reg_non_shared = i40iw_sc_mr_reg_non_shared,
- .mr_reg_shared = i40iw_sc_mr_reg_shared,
- .dealloc_stag = i40iw_sc_dealloc_stag,
- .query_stag = i40iw_sc_query_stag,
- .mw_alloc = i40iw_sc_mw_alloc
-};
-
-static const struct i40iw_cqp_misc_ops iw_cqp_misc_ops = {
- .manage_hmc_pm_func_table = i40iw_sc_manage_hmc_pm_func_table,
- .set_hmc_resource_profile = i40iw_sc_set_hmc_resource_profile,
- .commit_fpm_values = i40iw_sc_commit_fpm_values,
- .query_fpm_values = i40iw_sc_query_fpm_values,
- .static_hmc_pages_allocated = i40iw_sc_static_hmc_pages_allocated,
- .add_arp_cache_entry = i40iw_sc_add_arp_cache_entry,
- .del_arp_cache_entry = i40iw_sc_del_arp_cache_entry,
- .query_arp_cache_entry = i40iw_sc_query_arp_cache_entry,
- .manage_apbvt_entry = i40iw_sc_manage_apbvt_entry,
- .manage_qhash_table_entry = i40iw_sc_manage_qhash_table_entry,
- .alloc_local_mac_ipaddr_table_entry = i40iw_sc_alloc_local_mac_ipaddr_entry,
- .add_local_mac_ipaddr_entry = i40iw_sc_add_local_mac_ipaddr_entry,
- .del_local_mac_ipaddr_entry = i40iw_sc_del_local_mac_ipaddr_entry,
- .cqp_nop = i40iw_sc_cqp_nop,
- .commit_fpm_values_done = i40iw_sc_commit_fpm_values_done,
- .query_fpm_values_done = i40iw_sc_query_fpm_values_done,
- .manage_hmc_pm_func_table_done = i40iw_sc_manage_hmc_pm_func_table_done,
- .update_suspend_qp = i40iw_sc_suspend_qp,
- .update_resume_qp = i40iw_sc_resume_qp
-};
-
-static const struct i40iw_hmc_ops iw_hmc_ops = {
- .init_iw_hmc = i40iw_sc_init_iw_hmc,
- .parse_fpm_query_buf = i40iw_sc_parse_fpm_query_buf,
- .configure_iw_fpm = i40iw_sc_configure_iw_fpm,
- .parse_fpm_commit_buf = i40iw_sc_parse_fpm_commit_buf,
- .create_hmc_object = i40iw_sc_create_hmc_obj,
- .del_hmc_object = i40iw_sc_del_hmc_obj
-};
-
-/**
- * i40iw_device_init - Initialize IWARP device
- * @dev: IWARP device pointer
- * @info: IWARP init info
- */
-enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev,
- struct i40iw_device_init_info *info)
-{
- u32 val;
- u32 vchnl_ver = 0;
- u16 hmc_fcn = 0;
- enum i40iw_status_code ret_code = 0;
- u8 db_size;
-
- spin_lock_init(&dev->cqp_lock);
-
- i40iw_device_init_uk(&dev->dev_uk);
-
- dev->debug_mask = info->debug_mask;
-
- dev->hmc_fn_id = info->hmc_fn_id;
- dev->is_pf = info->is_pf;
-
- dev->fpm_query_buf_pa = info->fpm_query_buf_pa;
- dev->fpm_query_buf = info->fpm_query_buf;
-
- dev->fpm_commit_buf_pa = info->fpm_commit_buf_pa;
- dev->fpm_commit_buf = info->fpm_commit_buf;
-
- dev->hw = info->hw;
- dev->hw->hw_addr = info->bar0;
-
- if (dev->is_pf) {
- val = i40iw_rd32(dev->hw, I40E_GLPCI_DREVID);
- dev->hw_rev = (u8)RS_32(val, I40E_GLPCI_DREVID_DEFAULT_REVID);
-
- val = i40iw_rd32(dev->hw, I40E_GLPCI_LBARCTRL);
- db_size = (u8)RS_32(val, I40E_GLPCI_LBARCTRL_PE_DB_SIZE);
- if ((db_size != I40IW_PE_DB_SIZE_4M) &&
- (db_size != I40IW_PE_DB_SIZE_8M)) {
- i40iw_debug(dev, I40IW_DEBUG_DEV,
- "%s: PE doorbell is not enabled in CSR val 0x%x\n",
- __func__, val);
- ret_code = I40IW_ERR_PE_DOORBELL_NOT_ENABLED;
- return ret_code;
- }
- dev->db_addr = dev->hw->hw_addr + I40IW_DB_ADDR_OFFSET;
- dev->vchnl_if.vchnl_recv = i40iw_vchnl_recv_pf;
- } else {
- dev->db_addr = dev->hw->hw_addr + I40IW_VF_DB_ADDR_OFFSET;
- }
-
- dev->cqp_ops = &iw_cqp_ops;
- dev->ccq_ops = &iw_ccq_ops;
- dev->ceq_ops = &iw_ceq_ops;
- dev->aeq_ops = &iw_aeq_ops;
- dev->cqp_misc_ops = &iw_cqp_misc_ops;
- dev->iw_pd_ops = &iw_pd_ops;
- dev->iw_priv_qp_ops = &iw_priv_qp_ops;
- dev->iw_priv_cq_ops = &iw_priv_cq_ops;
- dev->mr_ops = &iw_mr_ops;
- dev->hmc_ops = &iw_hmc_ops;
- dev->vchnl_if.vchnl_send = info->vchnl_send;
- if (dev->vchnl_if.vchnl_send)
- dev->vchnl_up = true;
- else
- dev->vchnl_up = false;
- if (!dev->is_pf) {
- dev->vchnl_if.vchnl_recv = i40iw_vchnl_recv_vf;
- ret_code = i40iw_vchnl_vf_get_ver(dev, &vchnl_ver);
- if (!ret_code) {
- i40iw_debug(dev, I40IW_DEBUG_DEV,
- "%s: Get Channel version rc = 0x%0x, version is %u\n",
- __func__, ret_code, vchnl_ver);
- ret_code = i40iw_vchnl_vf_get_hmc_fcn(dev, &hmc_fcn);
- if (!ret_code) {
- i40iw_debug(dev, I40IW_DEBUG_DEV,
- "%s Get HMC function rc = 0x%0x, hmc fcn is %u\n",
- __func__, ret_code, hmc_fcn);
- dev->hmc_fn_id = (u8)hmc_fcn;
- }
- }
- }
- dev->iw_vf_cqp_ops = &iw_vf_cqp_ops;
-
- return ret_code;
-}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h
deleted file mode 100644
index 86d5a33c57cc..000000000000
--- a/drivers/infiniband/hw/i40iw/i40iw_d.h
+++ /dev/null
@@ -1,1746 +0,0 @@
-/*******************************************************************************
-*
-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
-*
-* This software is available to you under a choice of one of two
-* licenses. You may choose to be licensed under the terms of the GNU
-* General Public License (GPL) Version 2, available from the file
-* COPYING in the main directory of this source tree, or the
-* OpenFabrics.org BSD license below:
-*
-* Redistribution and use in source and binary forms, with or
-* without modification, are permitted provided that the following
-* conditions are met:
-*
-* - Redistributions of source code must retain the above
-* copyright notice, this list of conditions and the following
-* disclaimer.
-*
-* - Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials
-* provided with the distribution.
-*
-* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-* SOFTWARE.
-*
-*******************************************************************************/
-
-#ifndef I40IW_D_H
-#define I40IW_D_H
-
-#define I40IW_FIRST_USER_QP_ID 2
-
-#define I40IW_DB_ADDR_OFFSET (4 * 1024 * 1024 - 64 * 1024)
-#define I40IW_VF_DB_ADDR_OFFSET (64 * 1024)
-
-#define I40IW_PE_DB_SIZE_4M 1
-#define I40IW_PE_DB_SIZE_8M 2
-
-#define I40IW_DDP_VER 1
-#define I40IW_RDMAP_VER 1
-
-#define I40IW_RDMA_MODE_RDMAC 0
-#define I40IW_RDMA_MODE_IETF 1
-
-#define I40IW_QP_STATE_INVALID 0
-#define I40IW_QP_STATE_IDLE 1
-#define I40IW_QP_STATE_RTS 2
-#define I40IW_QP_STATE_CLOSING 3
-#define I40IW_QP_STATE_RESERVED 4
-#define I40IW_QP_STATE_TERMINATE 5
-#define I40IW_QP_STATE_ERROR 6
-
-#define I40IW_STAG_STATE_INVALID 0
-#define I40IW_STAG_STATE_VALID 1
-
-#define I40IW_STAG_TYPE_SHARED 0
-#define I40IW_STAG_TYPE_NONSHARED 1
-
-#define I40IW_MAX_USER_PRIORITY 8
-#define I40IW_MAX_STATS_COUNT 16
-#define I40IW_FIRST_NON_PF_STAT 4
-
-
-#define I40IW_MTU_TO_MSS_IPV4 40
-#define I40IW_MTU_TO_MSS_IPV6 60
-#define I40IW_DEFAULT_MTU 1500
-
-#define LS_64_1(val, bits) ((u64)(uintptr_t)val << bits)
-#define RS_64_1(val, bits) ((u64)(uintptr_t)val >> bits)
-#define LS_32_1(val, bits) (u32)(val << bits)
-#define RS_32_1(val, bits) (u32)(val >> bits)
-#define I40E_HI_DWORD(x) ((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF))
-
-#define QS_HANDLE_UNKNOWN 0xffff
-
-#define LS_64(val, field) (((u64)val << field ## _SHIFT) & (field ## _MASK))
-
-#define RS_64(val, field) ((u64)(val & field ## _MASK) >> field ## _SHIFT)
-#define LS_32(val, field) ((val << field ## _SHIFT) & (field ## _MASK))
-#define RS_32(val, field) ((val & field ## _MASK) >> field ## _SHIFT)
-
-#define TERM_DDP_LEN_TAGGED 14
-#define TERM_DDP_LEN_UNTAGGED 18
-#define TERM_RDMA_LEN 28
-#define RDMA_OPCODE_MASK 0x0f
-#define RDMA_READ_REQ_OPCODE 1
-#define Q2_BAD_FRAME_OFFSET 72
-#define Q2_FPSN_OFFSET 64
-#define CQE_MAJOR_DRV 0x8000
-
-#define I40IW_TERM_SENT 0x01
-#define I40IW_TERM_RCVD 0x02
-#define I40IW_TERM_DONE 0x04
-#define I40IW_MAC_HLEN 14
-
-#define I40IW_INVALID_WQE_INDEX 0xffffffff
-
-#define I40IW_CQP_WAIT_POLL_REGS 1
-#define I40IW_CQP_WAIT_POLL_CQ 2
-#define I40IW_CQP_WAIT_EVENT 3
-
-#define I40IW_CQP_INIT_WQE(wqe) memset(wqe, 0, 64)
-
-#define I40IW_GET_CURRENT_CQ_ELEMENT(_cq) \
- ( \
- &((_cq)->cq_base[I40IW_RING_GETCURRENT_HEAD((_cq)->cq_ring)]) \
- )
-#define I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(_cq) \
- ( \
- &(((struct i40iw_extended_cqe *) \
- ((_cq)->cq_base))[I40IW_RING_GETCURRENT_HEAD((_cq)->cq_ring)]) \
- )
-
-#define I40IW_GET_CURRENT_AEQ_ELEMENT(_aeq) \
- ( \
- &_aeq->aeqe_base[I40IW_RING_GETCURRENT_TAIL(_aeq->aeq_ring)] \
- )
-
-#define I40IW_GET_CURRENT_CEQ_ELEMENT(_ceq) \
- ( \
- &_ceq->ceqe_base[I40IW_RING_GETCURRENT_TAIL(_ceq->ceq_ring)] \
- )
-
-#define I40IW_AE_SOURCE_RSVD 0x0
-#define I40IW_AE_SOURCE_RQ 0x1
-#define I40IW_AE_SOURCE_RQ_0011 0x3
-
-#define I40IW_AE_SOURCE_CQ 0x2
-#define I40IW_AE_SOURCE_CQ_0110 0x6
-#define I40IW_AE_SOURCE_CQ_1010 0xA
-#define I40IW_AE_SOURCE_CQ_1110 0xE
-
-#define I40IW_AE_SOURCE_SQ 0x5
-#define I40IW_AE_SOURCE_SQ_0111 0x7
-
-#define I40IW_AE_SOURCE_IN_RR_WR 0x9
-#define I40IW_AE_SOURCE_IN_RR_WR_1011 0xB
-#define I40IW_AE_SOURCE_OUT_RR 0xD
-#define I40IW_AE_SOURCE_OUT_RR_1111 0xF
-
-#define I40IW_TCP_STATE_NON_EXISTENT 0
-#define I40IW_TCP_STATE_CLOSED 1
-#define I40IW_TCP_STATE_LISTEN 2
-#define I40IW_STATE_SYN_SEND 3
-#define I40IW_TCP_STATE_SYN_RECEIVED 4
-#define I40IW_TCP_STATE_ESTABLISHED 5
-#define I40IW_TCP_STATE_CLOSE_WAIT 6
-#define I40IW_TCP_STATE_FIN_WAIT_1 7
-#define I40IW_TCP_STATE_CLOSING 8
-#define I40IW_TCP_STATE_LAST_ACK 9
-#define I40IW_TCP_STATE_FIN_WAIT_2 10
-#define I40IW_TCP_STATE_TIME_WAIT 11
-#define I40IW_TCP_STATE_RESERVED_1 12
-#define I40IW_TCP_STATE_RESERVED_2 13
-#define I40IW_TCP_STATE_RESERVED_3 14
-#define I40IW_TCP_STATE_RESERVED_4 15
-
-/* ILQ CQP hash table fields */
-#define I40IW_CQPSQ_QHASH_VLANID_SHIFT 32
-#define I40IW_CQPSQ_QHASH_VLANID_MASK \
- ((u64)0xfff << I40IW_CQPSQ_QHASH_VLANID_SHIFT)
-
-#define I40IW_CQPSQ_QHASH_QPN_SHIFT 32
-#define I40IW_CQPSQ_QHASH_QPN_MASK \
- ((u64)0x3ffff << I40IW_CQPSQ_QHASH_QPN_SHIFT)
-
-#define I40IW_CQPSQ_QHASH_QS_HANDLE_SHIFT 0
-#define I40IW_CQPSQ_QHASH_QS_HANDLE_MASK ((u64)0x3ff << I40IW_CQPSQ_QHASH_QS_HANDLE_SHIFT)
-
-#define I40IW_CQPSQ_QHASH_SRC_PORT_SHIFT 16
-#define I40IW_CQPSQ_QHASH_SRC_PORT_MASK \
- ((u64)0xffff << I40IW_CQPSQ_QHASH_SRC_PORT_SHIFT)
-
-#define I40IW_CQPSQ_QHASH_DEST_PORT_SHIFT 0
-#define I40IW_CQPSQ_QHASH_DEST_PORT_MASK \
- ((u64)0xffff << I40IW_CQPSQ_QHASH_DEST_PORT_SHIFT)
-
-#define I40IW_CQPSQ_QHASH_ADDR0_SHIFT 32
-#define I40IW_CQPSQ_QHASH_ADDR0_MASK \
- ((u64)0xffffffff << I40IW_CQPSQ_QHASH_ADDR0_SHIFT)
-
-#define I40IW_CQPSQ_QHASH_ADDR1_SHIFT 0
-#define I40IW_CQPSQ_QHASH_ADDR1_MASK \
- ((u64)0xffffffff << I40IW_CQPSQ_QHASH_ADDR1_SHIFT)
-
-#define I40IW_CQPSQ_QHASH_ADDR2_SHIFT 32
-#define I40IW_CQPSQ_QHASH_ADDR2_MASK \
- ((u64)0xffffffff << I40IW_CQPSQ_QHASH_ADDR2_SHIFT)
-
-#define I40IW_CQPSQ_QHASH_ADDR3_SHIFT 0
-#define I40IW_CQPSQ_QHASH_ADDR3_MASK \
- ((u64)0xffffffff << I40IW_CQPSQ_QHASH_ADDR3_SHIFT)
-
-#define I40IW_CQPSQ_QHASH_WQEVALID_SHIFT 63
-#define I40IW_CQPSQ_QHASH_WQEVALID_MASK \
- ((u64)0x1 << I40IW_CQPSQ_QHASH_WQEVALID_SHIFT)
-#define I40IW_CQPSQ_QHASH_OPCODE_SHIFT 32
-#define I40IW_CQPSQ_QHASH_OPCODE_MASK \
- ((u64)0x3f << I40IW_CQPSQ_QHASH_OPCODE_SHIFT)
-
-#define I40IW_CQPSQ_QHASH_MANAGE_SHIFT 61
-#define I40IW_CQPSQ_QHASH_MANAGE_MASK \
- ((u64)0x3 << I40IW_CQPSQ_QHASH_MANAGE_SHIFT)
-
-#define I40IW_CQPSQ_QHASH_IPV4VALID_SHIFT 60
-#define I40IW_CQPSQ_QHASH_IPV4VALID_MASK \
- ((u64)0x1 << I40IW_CQPSQ_QHASH_IPV4VALID_SHIFT)
-
-#define I40IW_CQPSQ_QHASH_VLANVALID_SHIFT 59
-#define I40IW_CQPSQ_QHASH_VLANVALID_MASK \
- ((u64)0x1 << I40IW_CQPSQ_QHASH_VLANVALID_SHIFT)
-
-#define I40IW_CQPSQ_QHASH_ENTRYTYPE_SHIFT 42
-#define I40IW_CQPSQ_QHASH_ENTRYTYPE_MASK \
- ((u64)0x7 << I40IW_CQPSQ_QHASH_ENTRYTYPE_SHIFT)
-/* CQP Host Context */
-#define I40IW_CQPHC_EN_DC_TCP_SHIFT 0
-#define I40IW_CQPHC_EN_DC_TCP_MASK (1UL << I40IW_CQPHC_EN_DC_TCP_SHIFT)
-
-#define I40IW_CQPHC_SQSIZE_SHIFT 8
-#define I40IW_CQPHC_SQSIZE_MASK (0xfUL << I40IW_CQPHC_SQSIZE_SHIFT)
-
-#define I40IW_CQPHC_DISABLE_PFPDUS_SHIFT 1
-#define I40IW_CQPHC_DISABLE_PFPDUS_MASK (0x1UL << I40IW_CQPHC_DISABLE_PFPDUS_SHIFT)
-
-#define I40IW_CQPHC_ENABLED_VFS_SHIFT 32
-#define I40IW_CQPHC_ENABLED_VFS_MASK (0x3fULL << I40IW_CQPHC_ENABLED_VFS_SHIFT)
-
-#define I40IW_CQPHC_HMC_PROFILE_SHIFT 0
-#define I40IW_CQPHC_HMC_PROFILE_MASK (0x7ULL << I40IW_CQPHC_HMC_PROFILE_SHIFT)
-
-#define I40IW_CQPHC_SVER_SHIFT 24
-#define I40IW_CQPHC_SVER_MASK (0xffUL << I40IW_CQPHC_SVER_SHIFT)
-
-#define I40IW_CQPHC_SQBASE_SHIFT 9
-#define I40IW_CQPHC_SQBASE_MASK \
- (0xfffffffffffffeULL << I40IW_CQPHC_SQBASE_SHIFT)
-
-#define I40IW_CQPHC_QPCTX_SHIFT 0
-#define I40IW_CQPHC_QPCTX_MASK \
- (0xffffffffffffffffULL << I40IW_CQPHC_QPCTX_SHIFT)
-#define I40IW_CQPHC_SVER 1
-
-#define I40IW_CQP_SW_SQSIZE_4 4
-#define I40IW_CQP_SW_SQSIZE_2048 2048
-
-/* iWARP QP Doorbell shadow area */
-#define I40IW_QP_DBSA_HW_SQ_TAIL_SHIFT 0
-#define I40IW_QP_DBSA_HW_SQ_TAIL_MASK \
- (0x3fffUL << I40IW_QP_DBSA_HW_SQ_TAIL_SHIFT)
-
-/* Completion Queue Doorbell shadow area */
-#define I40IW_CQ_DBSA_CQEIDX_SHIFT 0
-#define I40IW_CQ_DBSA_CQEIDX_MASK (0xfffffUL << I40IW_CQ_DBSA_CQEIDX_SHIFT)
-
-#define I40IW_CQ_DBSA_SW_CQ_SELECT_SHIFT 0
-#define I40IW_CQ_DBSA_SW_CQ_SELECT_MASK \
- (0x3fffUL << I40IW_CQ_DBSA_SW_CQ_SELECT_SHIFT)
-
-#define I40IW_CQ_DBSA_ARM_NEXT_SHIFT 14
-#define I40IW_CQ_DBSA_ARM_NEXT_MASK (1UL << I40IW_CQ_DBSA_ARM_NEXT_SHIFT)
-
-#define I40IW_CQ_DBSA_ARM_NEXT_SE_SHIFT 15
-#define I40IW_CQ_DBSA_ARM_NEXT_SE_MASK (1UL << I40IW_CQ_DBSA_ARM_NEXT_SE_SHIFT)
-
-#define I40IW_CQ_DBSA_ARM_SEQ_NUM_SHIFT 16
-#define I40IW_CQ_DBSA_ARM_SEQ_NUM_MASK \
- (0x3UL << I40IW_CQ_DBSA_ARM_SEQ_NUM_SHIFT)
-
-/* CQP and iWARP Completion Queue */
-#define I40IW_CQ_QPCTX_SHIFT I40IW_CQPHC_QPCTX_SHIFT
-#define I40IW_CQ_QPCTX_MASK I40IW_CQPHC_QPCTX_MASK
-
-#define I40IW_CCQ_OPRETVAL_SHIFT 0
-#define I40IW_CCQ_OPRETVAL_MASK (0xffffffffUL << I40IW_CCQ_OPRETVAL_SHIFT)
-
-#define I40IW_CQ_MINERR_SHIFT 0
-#define I40IW_CQ_MINERR_MASK (0xffffUL << I40IW_CQ_MINERR_SHIFT)
-
-#define I40IW_CQ_MAJERR_SHIFT 16
-#define I40IW_CQ_MAJERR_MASK (0xffffUL << I40IW_CQ_MAJERR_SHIFT)
-
-#define I40IW_CQ_WQEIDX_SHIFT 32
-#define I40IW_CQ_WQEIDX_MASK (0x3fffULL << I40IW_CQ_WQEIDX_SHIFT)
-
-#define I40IW_CQ_ERROR_SHIFT 55
-#define I40IW_CQ_ERROR_MASK (1ULL << I40IW_CQ_ERROR_SHIFT)
-
-#define I40IW_CQ_SQ_SHIFT 62
-#define I40IW_CQ_SQ_MASK (1ULL << I40IW_CQ_SQ_SHIFT)
-
-#define I40IW_CQ_VALID_SHIFT 63
-#define I40IW_CQ_VALID_MASK (1ULL << I40IW_CQ_VALID_SHIFT)
-
-#define I40IWCQ_PAYLDLEN_SHIFT 0
-#define I40IWCQ_PAYLDLEN_MASK (0xffffffffUL << I40IWCQ_PAYLDLEN_SHIFT)
-
-#define I40IWCQ_TCPSEQNUM_SHIFT 32
-#define I40IWCQ_TCPSEQNUM_MASK (0xffffffffULL << I40IWCQ_TCPSEQNUM_SHIFT)
-
-#define I40IWCQ_INVSTAG_SHIFT 0
-#define I40IWCQ_INVSTAG_MASK (0xffffffffUL << I40IWCQ_INVSTAG_SHIFT)
-
-#define I40IWCQ_QPID_SHIFT 32
-#define I40IWCQ_QPID_MASK (0x3ffffULL << I40IWCQ_QPID_SHIFT)
-
-#define I40IWCQ_PSHDROP_SHIFT 51
-#define I40IWCQ_PSHDROP_MASK (1ULL << I40IWCQ_PSHDROP_SHIFT)
-
-#define I40IWCQ_SRQ_SHIFT 52
-#define I40IWCQ_SRQ_MASK (1ULL << I40IWCQ_SRQ_SHIFT)
-
-#define I40IWCQ_STAG_SHIFT 53
-#define I40IWCQ_STAG_MASK (1ULL << I40IWCQ_STAG_SHIFT)
-
-#define I40IWCQ_SOEVENT_SHIFT 54
-#define I40IWCQ_SOEVENT_MASK (1ULL << I40IWCQ_SOEVENT_SHIFT)
-
-#define I40IWCQ_OP_SHIFT 56
-#define I40IWCQ_OP_MASK (0x3fULL << I40IWCQ_OP_SHIFT)
-
-/* CEQE format */
-#define I40IW_CEQE_CQCTX_SHIFT 0
-#define I40IW_CEQE_CQCTX_MASK \
- (0x7fffffffffffffffULL << I40IW_CEQE_CQCTX_SHIFT)
-
-#define I40IW_CEQE_VALID_SHIFT 63
-#define I40IW_CEQE_VALID_MASK (1ULL << I40IW_CEQE_VALID_SHIFT)
-
-/* AEQE format */
-#define I40IW_AEQE_COMPCTX_SHIFT I40IW_CQPHC_QPCTX_SHIFT
-#define I40IW_AEQE_COMPCTX_MASK I40IW_CQPHC_QPCTX_MASK
-
-#define I40IW_AEQE_QPCQID_SHIFT 0
-#define I40IW_AEQE_QPCQID_MASK (0x3ffffUL << I40IW_AEQE_QPCQID_SHIFT)
-
-#define I40IW_AEQE_WQDESCIDX_SHIFT 18
-#define I40IW_AEQE_WQDESCIDX_MASK (0x3fffULL << I40IW_AEQE_WQDESCIDX_SHIFT)
-
-#define I40IW_AEQE_OVERFLOW_SHIFT 33
-#define I40IW_AEQE_OVERFLOW_MASK (1ULL << I40IW_AEQE_OVERFLOW_SHIFT)
-
-#define I40IW_AEQE_AECODE_SHIFT 34
-#define I40IW_AEQE_AECODE_MASK (0xffffULL << I40IW_AEQE_AECODE_SHIFT)
-
-#define I40IW_AEQE_AESRC_SHIFT 50
-#define I40IW_AEQE_AESRC_MASK (0xfULL << I40IW_AEQE_AESRC_SHIFT)
-
-#define I40IW_AEQE_IWSTATE_SHIFT 54
-#define I40IW_AEQE_IWSTATE_MASK (0x7ULL << I40IW_AEQE_IWSTATE_SHIFT)
-
-#define I40IW_AEQE_TCPSTATE_SHIFT 57
-#define I40IW_AEQE_TCPSTATE_MASK (0xfULL << I40IW_AEQE_TCPSTATE_SHIFT)
-
-#define I40IW_AEQE_Q2DATA_SHIFT 61
-#define I40IW_AEQE_Q2DATA_MASK (0x3ULL << I40IW_AEQE_Q2DATA_SHIFT)
-
-#define I40IW_AEQE_VALID_SHIFT 63
-#define I40IW_AEQE_VALID_MASK (1ULL << I40IW_AEQE_VALID_SHIFT)
-
-/* CQP SQ WQES */
-#define I40IW_QP_TYPE_IWARP 1
-#define I40IW_QP_TYPE_UDA 2
-#define I40IW_QP_TYPE_CQP 4
-
-#define I40IW_CQ_TYPE_IWARP 1
-#define I40IW_CQ_TYPE_ILQ 2
-#define I40IW_CQ_TYPE_IEQ 3
-#define I40IW_CQ_TYPE_CQP 4
-
-#define I40IWQP_TERM_SEND_TERM_AND_FIN 0
-#define I40IWQP_TERM_SEND_TERM_ONLY 1
-#define I40IWQP_TERM_SEND_FIN_ONLY 2
-#define I40IWQP_TERM_DONOT_SEND_TERM_OR_FIN 3
-
-#define I40IW_CQP_OP_CREATE_QP 0
-#define I40IW_CQP_OP_MODIFY_QP 0x1
-#define I40IW_CQP_OP_DESTROY_QP 0x02
-#define I40IW_CQP_OP_CREATE_CQ 0x03
-#define I40IW_CQP_OP_MODIFY_CQ 0x04
-#define I40IW_CQP_OP_DESTROY_CQ 0x05
-#define I40IW_CQP_OP_CREATE_SRQ 0x06
-#define I40IW_CQP_OP_MODIFY_SRQ 0x07
-#define I40IW_CQP_OP_DESTROY_SRQ 0x08
-#define I40IW_CQP_OP_ALLOC_STAG 0x09
-#define I40IW_CQP_OP_REG_MR 0x0a
-#define I40IW_CQP_OP_QUERY_STAG 0x0b
-#define I40IW_CQP_OP_REG_SMR 0x0c
-#define I40IW_CQP_OP_DEALLOC_STAG 0x0d
-#define I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE 0x0e
-#define I40IW_CQP_OP_MANAGE_ARP 0x0f
-#define I40IW_CQP_OP_MANAGE_VF_PBLE_BP 0x10
-#define I40IW_CQP_OP_QUERY_RDMA_FEATURES 0x12
-#define I40IW_CQP_OP_UPLOAD_CONTEXT 0x13
-#define I40IW_CQP_OP_ALLOCATE_LOC_MAC_IP_TABLE_ENTRY 0x14
-#define I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE 0x15
-#define I40IW_CQP_OP_CREATE_CEQ 0x16
-#define I40IW_CQP_OP_DESTROY_CEQ 0x18
-#define I40IW_CQP_OP_CREATE_AEQ 0x19
-#define I40IW_CQP_OP_DESTROY_AEQ 0x1b
-#define I40IW_CQP_OP_CREATE_ADDR_VECT 0x1c
-#define I40IW_CQP_OP_MODIFY_ADDR_VECT 0x1d
-#define I40IW_CQP_OP_DESTROY_ADDR_VECT 0x1e
-#define I40IW_CQP_OP_UPDATE_PE_SDS 0x1f
-#define I40IW_CQP_OP_QUERY_FPM_VALUES 0x20
-#define I40IW_CQP_OP_COMMIT_FPM_VALUES 0x21
-#define I40IW_CQP_OP_FLUSH_WQES 0x22
-/* I40IW_CQP_OP_GEN_AE is the same value as I40IW_CQP_OP_FLUSH_WQES */
-#define I40IW_CQP_OP_GEN_AE 0x22
-#define I40IW_CQP_OP_MANAGE_APBVT 0x23
-#define I40IW_CQP_OP_NOP 0x24
-#define I40IW_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY 0x25
-#define I40IW_CQP_OP_CREATE_UDA_MCAST_GROUP 0x26
-#define I40IW_CQP_OP_MODIFY_UDA_MCAST_GROUP 0x27
-#define I40IW_CQP_OP_DESTROY_UDA_MCAST_GROUP 0x28
-#define I40IW_CQP_OP_SUSPEND_QP 0x29
-#define I40IW_CQP_OP_RESUME_QP 0x2a
-#define I40IW_CQP_OP_SHMC_PAGES_ALLOCATED 0x2b
-#define I40IW_CQP_OP_SET_HMC_RESOURCE_PROFILE 0x2d
-
-#define I40IW_FEATURE_BUF_SIZE (8 * I40IW_MAX_FEATURES)
-
-#define I40IW_FW_VER_MINOR_SHIFT 0
-#define I40IW_FW_VER_MINOR_MASK \
- (0xffffULL << I40IW_FW_VER_MINOR_SHIFT)
-
-#define I40IW_FW_VER_MAJOR_SHIFT 16
-#define I40IW_FW_VER_MAJOR_MASK \
- (0xffffULL << I40IW_FW_VER_MAJOR_SHIFT)
-
-#define I40IW_FEATURE_INFO_SHIFT 0
-#define I40IW_FEATURE_INFO_MASK \
- (0xffffULL << I40IW_FEATURE_INFO_SHIFT)
-
-#define I40IW_FEATURE_CNT_SHIFT 32
-#define I40IW_FEATURE_CNT_MASK \
- (0xffffULL << I40IW_FEATURE_CNT_SHIFT)
-
-#define I40IW_UDA_QPSQ_NEXT_HEADER_SHIFT 16
-#define I40IW_UDA_QPSQ_NEXT_HEADER_MASK ((u64)0xff << I40IW_UDA_QPSQ_NEXT_HEADER_SHIFT)
-
-#define I40IW_UDA_QPSQ_OPCODE_SHIFT 32
-#define I40IW_UDA_QPSQ_OPCODE_MASK ((u64)0x3f << I40IW_UDA_QPSQ_OPCODE_SHIFT)
-
-#define I40IW_UDA_QPSQ_MACLEN_SHIFT 56
-#define I40IW_UDA_QPSQ_MACLEN_MASK \
- ((u64)0x7f << I40IW_UDA_QPSQ_MACLEN_SHIFT)
-
-#define I40IW_UDA_QPSQ_IPLEN_SHIFT 48
-#define I40IW_UDA_QPSQ_IPLEN_MASK \
- ((u64)0x7f << I40IW_UDA_QPSQ_IPLEN_SHIFT)
-
-#define I40IW_UDA_QPSQ_L4T_SHIFT 30
-#define I40IW_UDA_QPSQ_L4T_MASK \
- ((u64)0x3 << I40IW_UDA_QPSQ_L4T_SHIFT)
-
-#define I40IW_UDA_QPSQ_IIPT_SHIFT 28
-#define I40IW_UDA_QPSQ_IIPT_MASK \
- ((u64)0x3 << I40IW_UDA_QPSQ_IIPT_SHIFT)
-
-#define I40IW_UDA_QPSQ_L4LEN_SHIFT 24
-#define I40IW_UDA_QPSQ_L4LEN_MASK ((u64)0xf << I40IW_UDA_QPSQ_L4LEN_SHIFT)
-
-#define I40IW_UDA_QPSQ_AVIDX_SHIFT 0
-#define I40IW_UDA_QPSQ_AVIDX_MASK ((u64)0xffff << I40IW_UDA_QPSQ_AVIDX_SHIFT)
-
-#define I40IW_UDA_QPSQ_VALID_SHIFT 63
-#define I40IW_UDA_QPSQ_VALID_MASK \
- ((u64)0x1 << I40IW_UDA_QPSQ_VALID_SHIFT)
-
-#define I40IW_UDA_QPSQ_SIGCOMPL_SHIFT 62
-#define I40IW_UDA_QPSQ_SIGCOMPL_MASK ((u64)0x1 << I40IW_UDA_QPSQ_SIGCOMPL_SHIFT)
-
-#define I40IW_UDA_PAYLOADLEN_SHIFT 0
-#define I40IW_UDA_PAYLOADLEN_MASK ((u64)0x3fff << I40IW_UDA_PAYLOADLEN_SHIFT)
-
-#define I40IW_UDA_HDRLEN_SHIFT 16
-#define I40IW_UDA_HDRLEN_MASK ((u64)0x1ff << I40IW_UDA_HDRLEN_SHIFT)
-
-#define I40IW_VLAN_TAG_VALID_SHIFT 50
-#define I40IW_VLAN_TAG_VALID_MASK ((u64)0x1 << I40IW_VLAN_TAG_VALID_SHIFT)
-
-#define I40IW_UDA_L3PROTO_SHIFT 0
-#define I40IW_UDA_L3PROTO_MASK ((u64)0x3 << I40IW_UDA_L3PROTO_SHIFT)
-
-#define I40IW_UDA_L4PROTO_SHIFT 16
-#define I40IW_UDA_L4PROTO_MASK ((u64)0x3 << I40IW_UDA_L4PROTO_SHIFT)
-
-#define I40IW_UDA_QPSQ_DOLOOPBACK_SHIFT 44
-#define I40IW_UDA_QPSQ_DOLOOPBACK_MASK \
- ((u64)0x1 << I40IW_UDA_QPSQ_DOLOOPBACK_SHIFT)
-
-/* CQP SQ WQE common fields */
-#define I40IW_CQPSQ_OPCODE_SHIFT 32
-#define I40IW_CQPSQ_OPCODE_MASK (0x3fULL << I40IW_CQPSQ_OPCODE_SHIFT)
-
-#define I40IW_CQPSQ_WQEVALID_SHIFT 63
-#define I40IW_CQPSQ_WQEVALID_MASK (1ULL << I40IW_CQPSQ_WQEVALID_SHIFT)
-
-#define I40IW_CQPSQ_TPHVAL_SHIFT 0
-#define I40IW_CQPSQ_TPHVAL_MASK (0xffUL << I40IW_CQPSQ_TPHVAL_SHIFT)
-
-#define I40IW_CQPSQ_TPHEN_SHIFT 60
-#define I40IW_CQPSQ_TPHEN_MASK (1ULL << I40IW_CQPSQ_TPHEN_SHIFT)
-
-#define I40IW_CQPSQ_PBUFADDR_SHIFT I40IW_CQPHC_QPCTX_SHIFT
-#define I40IW_CQPSQ_PBUFADDR_MASK I40IW_CQPHC_QPCTX_MASK
-
-/* Create/Modify/Destroy QP */
-
-#define I40IW_CQPSQ_QP_NEWMSS_SHIFT 32
-#define I40IW_CQPSQ_QP_NEWMSS_MASK (0x3fffULL << I40IW_CQPSQ_QP_NEWMSS_SHIFT)
-
-#define I40IW_CQPSQ_QP_TERMLEN_SHIFT 48
-#define I40IW_CQPSQ_QP_TERMLEN_MASK (0xfULL << I40IW_CQPSQ_QP_TERMLEN_SHIFT)
-
-#define I40IW_CQPSQ_QP_QPCTX_SHIFT I40IW_CQPHC_QPCTX_SHIFT
-#define I40IW_CQPSQ_QP_QPCTX_MASK I40IW_CQPHC_QPCTX_MASK
-
-#define I40IW_CQPSQ_QP_QPID_SHIFT 0
-#define I40IW_CQPSQ_QP_QPID_MASK (0x3FFFFUL)
-/* I40IWCQ_QPID_MASK */
-
-#define I40IW_CQPSQ_QP_OP_SHIFT 32
-#define I40IW_CQPSQ_QP_OP_MASK I40IWCQ_OP_MASK
-
-#define I40IW_CQPSQ_QP_ORDVALID_SHIFT 42
-#define I40IW_CQPSQ_QP_ORDVALID_MASK (1ULL << I40IW_CQPSQ_QP_ORDVALID_SHIFT)
-
-#define I40IW_CQPSQ_QP_TOECTXVALID_SHIFT 43
-#define I40IW_CQPSQ_QP_TOECTXVALID_MASK \
- (1ULL << I40IW_CQPSQ_QP_TOECTXVALID_SHIFT)
-
-#define I40IW_CQPSQ_QP_CACHEDVARVALID_SHIFT 44
-#define I40IW_CQPSQ_QP_CACHEDVARVALID_MASK \
- (1ULL << I40IW_CQPSQ_QP_CACHEDVARVALID_SHIFT)
-
-#define I40IW_CQPSQ_QP_VQ_SHIFT 45
-#define I40IW_CQPSQ_QP_VQ_MASK (1ULL << I40IW_CQPSQ_QP_VQ_SHIFT)
-
-#define I40IW_CQPSQ_QP_FORCELOOPBACK_SHIFT 46
-#define I40IW_CQPSQ_QP_FORCELOOPBACK_MASK \
- (1ULL << I40IW_CQPSQ_QP_FORCELOOPBACK_SHIFT)
-
-#define I40IW_CQPSQ_QP_CQNUMVALID_SHIFT 47
-#define I40IW_CQPSQ_QP_CQNUMVALID_MASK \
- (1ULL << I40IW_CQPSQ_QP_CQNUMVALID_SHIFT)
-
-#define I40IW_CQPSQ_QP_QPTYPE_SHIFT 48
-#define I40IW_CQPSQ_QP_QPTYPE_MASK (0x3ULL << I40IW_CQPSQ_QP_QPTYPE_SHIFT)
-
-#define I40IW_CQPSQ_QP_MSSCHANGE_SHIFT 52
-#define I40IW_CQPSQ_QP_MSSCHANGE_MASK (1ULL << I40IW_CQPSQ_QP_MSSCHANGE_SHIFT)
-
-#define I40IW_CQPSQ_QP_IGNOREMWBOUND_SHIFT 54
-#define I40IW_CQPSQ_QP_IGNOREMWBOUND_MASK \
- (1ULL << I40IW_CQPSQ_QP_IGNOREMWBOUND_SHIFT)
-
-#define I40IW_CQPSQ_QP_REMOVEHASHENTRY_SHIFT 55
-#define I40IW_CQPSQ_QP_REMOVEHASHENTRY_MASK \
- (1ULL << I40IW_CQPSQ_QP_REMOVEHASHENTRY_SHIFT)
-
-#define I40IW_CQPSQ_QP_TERMACT_SHIFT 56
-#define I40IW_CQPSQ_QP_TERMACT_MASK (0x3ULL << I40IW_CQPSQ_QP_TERMACT_SHIFT)
-
-#define I40IW_CQPSQ_QP_RESETCON_SHIFT 58
-#define I40IW_CQPSQ_QP_RESETCON_MASK (1ULL << I40IW_CQPSQ_QP_RESETCON_SHIFT)
-
-#define I40IW_CQPSQ_QP_ARPTABIDXVALID_SHIFT 59
-#define I40IW_CQPSQ_QP_ARPTABIDXVALID_MASK \
- (1ULL << I40IW_CQPSQ_QP_ARPTABIDXVALID_SHIFT)
-
-#define I40IW_CQPSQ_QP_NEXTIWSTATE_SHIFT 60
-#define I40IW_CQPSQ_QP_NEXTIWSTATE_MASK \
- (0x7ULL << I40IW_CQPSQ_QP_NEXTIWSTATE_SHIFT)
-
-#define I40IW_CQPSQ_QP_DBSHADOWADDR_SHIFT I40IW_CQPHC_QPCTX_SHIFT
-#define I40IW_CQPSQ_QP_DBSHADOWADDR_MASK I40IW_CQPHC_QPCTX_MASK
-
-/* Create/Modify/Destroy CQ */
-#define I40IW_CQPSQ_CQ_CQSIZE_SHIFT 0
-#define I40IW_CQPSQ_CQ_CQSIZE_MASK (0x3ffffUL << I40IW_CQPSQ_CQ_CQSIZE_SHIFT)
-
-#define I40IW_CQPSQ_CQ_CQCTX_SHIFT 0
-#define I40IW_CQPSQ_CQ_CQCTX_MASK \
- (0x7fffffffffffffffULL << I40IW_CQPSQ_CQ_CQCTX_SHIFT)
-
-#define I40IW_CQPSQ_CQ_CQCTX_SHIFT 0
-#define I40IW_CQPSQ_CQ_CQCTX_MASK \
- (0x7fffffffffffffffULL << I40IW_CQPSQ_CQ_CQCTX_SHIFT)
-
-#define I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD_SHIFT 0
-#define I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD_MASK \
- (0x3ffff << I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD_SHIFT)
-
-#define I40IW_CQPSQ_CQ_CEQID_SHIFT 24
-#define I40IW_CQPSQ_CQ_CEQID_MASK (0x7fUL << I40IW_CQPSQ_CQ_CEQID_SHIFT)
-
-#define I40IW_CQPSQ_CQ_OP_SHIFT 32
-#define I40IW_CQPSQ_CQ_OP_MASK (0x3fULL << I40IW_CQPSQ_CQ_OP_SHIFT)
-
-#define I40IW_CQPSQ_CQ_CQRESIZE_SHIFT 43
-#define I40IW_CQPSQ_CQ_CQRESIZE_MASK (1ULL << I40IW_CQPSQ_CQ_CQRESIZE_SHIFT)
-
-#define I40IW_CQPSQ_CQ_LPBLSIZE_SHIFT 44
-#define I40IW_CQPSQ_CQ_LPBLSIZE_MASK (3ULL << I40IW_CQPSQ_CQ_LPBLSIZE_SHIFT)
-
-#define I40IW_CQPSQ_CQ_CHKOVERFLOW_SHIFT 46
-#define I40IW_CQPSQ_CQ_CHKOVERFLOW_MASK \
- (1ULL << I40IW_CQPSQ_CQ_CHKOVERFLOW_SHIFT)
-
-#define I40IW_CQPSQ_CQ_VIRTMAP_SHIFT 47
-#define I40IW_CQPSQ_CQ_VIRTMAP_MASK (1ULL << I40IW_CQPSQ_CQ_VIRTMAP_SHIFT)
-
-#define I40IW_CQPSQ_CQ_ENCEQEMASK_SHIFT 48
-#define I40IW_CQPSQ_CQ_ENCEQEMASK_MASK \
- (1ULL << I40IW_CQPSQ_CQ_ENCEQEMASK_SHIFT)
-
-#define I40IW_CQPSQ_CQ_CEQIDVALID_SHIFT 49
-#define I40IW_CQPSQ_CQ_CEQIDVALID_MASK \
- (1ULL << I40IW_CQPSQ_CQ_CEQIDVALID_SHIFT)
-
-#define I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT_SHIFT 61
-#define I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT_MASK \
- (1ULL << I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT_SHIFT)
-
-/* Create/Modify/Destroy Shared Receive Queue */
-
-#define I40IW_CQPSQ_SRQ_RQSIZE_SHIFT 0
-#define I40IW_CQPSQ_SRQ_RQSIZE_MASK (0xfUL << I40IW_CQPSQ_SRQ_RQSIZE_SHIFT)
-
-#define I40IW_CQPSQ_SRQ_RQWQESIZE_SHIFT 4
-#define I40IW_CQPSQ_SRQ_RQWQESIZE_MASK \
- (0x7UL << I40IW_CQPSQ_SRQ_RQWQESIZE_SHIFT)
-
-#define I40IW_CQPSQ_SRQ_SRQLIMIT_SHIFT 32
-#define I40IW_CQPSQ_SRQ_SRQLIMIT_MASK \
- (0xfffULL << I40IW_CQPSQ_SRQ_SRQLIMIT_SHIFT)
-
-#define I40IW_CQPSQ_SRQ_SRQCTX_SHIFT I40IW_CQPHC_QPCTX_SHIFT
-#define I40IW_CQPSQ_SRQ_SRQCTX_MASK I40IW_CQPHC_QPCTX_MASK
-
-#define I40IW_CQPSQ_SRQ_PDID_SHIFT 16
-#define I40IW_CQPSQ_SRQ_PDID_MASK \
- (0x7fffULL << I40IW_CQPSQ_SRQ_PDID_SHIFT)
-
-#define I40IW_CQPSQ_SRQ_SRQID_SHIFT 0
-#define I40IW_CQPSQ_SRQ_SRQID_MASK (0x7fffUL << I40IW_CQPSQ_SRQ_SRQID_SHIFT)
-
-#define I40IW_CQPSQ_SRQ_LPBLSIZE_SHIFT I40IW_CQPSQ_CQ_LPBLSIZE_SHIFT
-#define I40IW_CQPSQ_SRQ_LPBLSIZE_MASK I40IW_CQPSQ_CQ_LPBLSIZE_MASK
-
-#define I40IW_CQPSQ_SRQ_VIRTMAP_SHIFT I40IW_CQPSQ_CQ_VIRTMAP_SHIFT
-#define I40IW_CQPSQ_SRQ_VIRTMAP_MASK I40IW_CQPSQ_CQ_VIRTMAP_MASK
-
-#define I40IW_CQPSQ_SRQ_TPHEN_SHIFT I40IW_CQPSQ_TPHEN_SHIFT
-#define I40IW_CQPSQ_SRQ_TPHEN_MASK I40IW_CQPSQ_TPHEN_MASK
-
-#define I40IW_CQPSQ_SRQ_ARMLIMITEVENT_SHIFT 61
-#define I40IW_CQPSQ_SRQ_ARMLIMITEVENT_MASK \
- (1ULL << I40IW_CQPSQ_SRQ_ARMLIMITEVENT_SHIFT)
-
-#define I40IW_CQPSQ_SRQ_DBSHADOWAREA_SHIFT 6
-#define I40IW_CQPSQ_SRQ_DBSHADOWAREA_MASK \
- (0x3ffffffffffffffULL << I40IW_CQPSQ_SRQ_DBSHADOWAREA_SHIFT)
-
-#define I40IW_CQPSQ_SRQ_FIRSTPMPBLIDX_SHIFT 0
-#define I40IW_CQPSQ_SRQ_FIRSTPMPBLIDX_MASK \
- (0xfffffffUL << I40IW_CQPSQ_SRQ_FIRSTPMPBLIDX_SHIFT)
-
-/* Allocate/Register/Register Shared/Deallocate Stag */
-#define I40IW_CQPSQ_STAG_VA_FBO_SHIFT I40IW_CQPHC_QPCTX_SHIFT
-#define I40IW_CQPSQ_STAG_VA_FBO_MASK I40IW_CQPHC_QPCTX_MASK
-
-#define I40IW_CQPSQ_STAG_STAGLEN_SHIFT 0
-#define I40IW_CQPSQ_STAG_STAGLEN_MASK \
- (0x3fffffffffffULL << I40IW_CQPSQ_STAG_STAGLEN_SHIFT)
-
-#define I40IW_CQPSQ_STAG_PDID_SHIFT 48
-#define I40IW_CQPSQ_STAG_PDID_MASK (0x7fffULL << I40IW_CQPSQ_STAG_PDID_SHIFT)
-
-#define I40IW_CQPSQ_STAG_KEY_SHIFT 0
-#define I40IW_CQPSQ_STAG_KEY_MASK (0xffUL << I40IW_CQPSQ_STAG_KEY_SHIFT)
-
-#define I40IW_CQPSQ_STAG_IDX_SHIFT 8
-#define I40IW_CQPSQ_STAG_IDX_MASK (0xffffffUL << I40IW_CQPSQ_STAG_IDX_SHIFT)
-
-#define I40IW_CQPSQ_STAG_PARENTSTAGIDX_SHIFT 32
-#define I40IW_CQPSQ_STAG_PARENTSTAGIDX_MASK \
- (0xffffffULL << I40IW_CQPSQ_STAG_PARENTSTAGIDX_SHIFT)
-
-#define I40IW_CQPSQ_STAG_MR_SHIFT 43
-#define I40IW_CQPSQ_STAG_MR_MASK (1ULL << I40IW_CQPSQ_STAG_MR_SHIFT)
-
-#define I40IW_CQPSQ_STAG_LPBLSIZE_SHIFT I40IW_CQPSQ_CQ_LPBLSIZE_SHIFT
-#define I40IW_CQPSQ_STAG_LPBLSIZE_MASK I40IW_CQPSQ_CQ_LPBLSIZE_MASK
-
-#define I40IW_CQPSQ_STAG_HPAGESIZE_SHIFT 46
-#define I40IW_CQPSQ_STAG_HPAGESIZE_MASK \
- (1ULL << I40IW_CQPSQ_STAG_HPAGESIZE_SHIFT)
-
-#define I40IW_CQPSQ_STAG_ARIGHTS_SHIFT 48
-#define I40IW_CQPSQ_STAG_ARIGHTS_MASK \
- (0x1fULL << I40IW_CQPSQ_STAG_ARIGHTS_SHIFT)
-
-#define I40IW_CQPSQ_STAG_REMACCENABLED_SHIFT 53
-#define I40IW_CQPSQ_STAG_REMACCENABLED_MASK \
- (1ULL << I40IW_CQPSQ_STAG_REMACCENABLED_SHIFT)
-
-#define I40IW_CQPSQ_STAG_VABASEDTO_SHIFT 59
-#define I40IW_CQPSQ_STAG_VABASEDTO_MASK \
- (1ULL << I40IW_CQPSQ_STAG_VABASEDTO_SHIFT)
-
-#define I40IW_CQPSQ_STAG_USEHMCFNIDX_SHIFT 60
-#define I40IW_CQPSQ_STAG_USEHMCFNIDX_MASK \
- (1ULL << I40IW_CQPSQ_STAG_USEHMCFNIDX_SHIFT)
-
-#define I40IW_CQPSQ_STAG_USEPFRID_SHIFT 61
-#define I40IW_CQPSQ_STAG_USEPFRID_MASK \
- (1ULL << I40IW_CQPSQ_STAG_USEPFRID_SHIFT)
-
-#define I40IW_CQPSQ_STAG_PBA_SHIFT I40IW_CQPHC_QPCTX_SHIFT
-#define I40IW_CQPSQ_STAG_PBA_MASK I40IW_CQPHC_QPCTX_MASK
-
-#define I40IW_CQPSQ_STAG_HMCFNIDX_SHIFT 0
-#define I40IW_CQPSQ_STAG_HMCFNIDX_MASK \
- (0x3fUL << I40IW_CQPSQ_STAG_HMCFNIDX_SHIFT)
-
-#define I40IW_CQPSQ_STAG_FIRSTPMPBLIDX_SHIFT 0
-#define I40IW_CQPSQ_STAG_FIRSTPMPBLIDX_MASK \
- (0xfffffffUL << I40IW_CQPSQ_STAG_FIRSTPMPBLIDX_SHIFT)
-
-/* Query stag */
-#define I40IW_CQPSQ_QUERYSTAG_IDX_SHIFT I40IW_CQPSQ_STAG_IDX_SHIFT
-#define I40IW_CQPSQ_QUERYSTAG_IDX_MASK I40IW_CQPSQ_STAG_IDX_MASK
-
-/* Allocate Local IP Address Entry */
-
-/* Manage Local IP Address Table - MLIPA */
-#define I40IW_CQPSQ_MLIPA_IPV6LO_SHIFT I40IW_CQPHC_QPCTX_SHIFT
-#define I40IW_CQPSQ_MLIPA_IPV6LO_MASK I40IW_CQPHC_QPCTX_MASK
-
-#define I40IW_CQPSQ_MLIPA_IPV6HI_SHIFT I40IW_CQPHC_QPCTX_SHIFT
-#define I40IW_CQPSQ_MLIPA_IPV6HI_MASK I40IW_CQPHC_QPCTX_MASK
-
-#define I40IW_CQPSQ_MLIPA_IPV4_SHIFT 0
-#define I40IW_CQPSQ_MLIPA_IPV4_MASK \
- (0xffffffffUL << I40IW_CQPSQ_MLIPA_IPV4_SHIFT)
-
-#define I40IW_CQPSQ_MLIPA_IPTABLEIDX_SHIFT 0
-#define I40IW_CQPSQ_MLIPA_IPTABLEIDX_MASK \
- (0x3fUL << I40IW_CQPSQ_MLIPA_IPTABLEIDX_SHIFT)
-
-#define I40IW_CQPSQ_MLIPA_IPV4VALID_SHIFT 42
-#define I40IW_CQPSQ_MLIPA_IPV4VALID_MASK \
- (1ULL << I40IW_CQPSQ_MLIPA_IPV4VALID_SHIFT)
-
-#define I40IW_CQPSQ_MLIPA_IPV6VALID_SHIFT 43
-#define I40IW_CQPSQ_MLIPA_IPV6VALID_MASK \
- (1ULL << I40IW_CQPSQ_MLIPA_IPV6VALID_SHIFT)
-
-#define I40IW_CQPSQ_MLIPA_FREEENTRY_SHIFT 62
-#define I40IW_CQPSQ_MLIPA_FREEENTRY_MASK \
- (1ULL << I40IW_CQPSQ_MLIPA_FREEENTRY_SHIFT)
-
-#define I40IW_CQPSQ_MLIPA_IGNORE_REF_CNT_SHIFT 61
-#define I40IW_CQPSQ_MLIPA_IGNORE_REF_CNT_MASK \
- (1ULL << I40IW_CQPSQ_MLIPA_IGNORE_REF_CNT_SHIFT)
-
-#define I40IW_CQPSQ_MLIPA_MAC0_SHIFT 0
-#define I40IW_CQPSQ_MLIPA_MAC0_MASK (0xffUL << I40IW_CQPSQ_MLIPA_MAC0_SHIFT)
-
-#define I40IW_CQPSQ_MLIPA_MAC1_SHIFT 8
-#define I40IW_CQPSQ_MLIPA_MAC1_MASK (0xffUL << I40IW_CQPSQ_MLIPA_MAC1_SHIFT)
-
-#define I40IW_CQPSQ_MLIPA_MAC2_SHIFT 16
-#define I40IW_CQPSQ_MLIPA_MAC2_MASK (0xffUL << I40IW_CQPSQ_MLIPA_MAC2_SHIFT)
-
-#define I40IW_CQPSQ_MLIPA_MAC3_SHIFT 24
-#define I40IW_CQPSQ_MLIPA_MAC3_MASK (0xffUL << I40IW_CQPSQ_MLIPA_MAC3_SHIFT)
-
-#define I40IW_CQPSQ_MLIPA_MAC4_SHIFT 32
-#define I40IW_CQPSQ_MLIPA_MAC4_MASK (0xffULL << I40IW_CQPSQ_MLIPA_MAC4_SHIFT)
-
-#define I40IW_CQPSQ_MLIPA_MAC5_SHIFT 40
-#define I40IW_CQPSQ_MLIPA_MAC5_MASK (0xffULL << I40IW_CQPSQ_MLIPA_MAC5_SHIFT)
-
-/* Manage ARP Table - MAT */
-#define I40IW_CQPSQ_MAT_REACHMAX_SHIFT 0
-#define I40IW_CQPSQ_MAT_REACHMAX_MASK \
- (0xffffffffUL << I40IW_CQPSQ_MAT_REACHMAX_SHIFT)
-
-#define I40IW_CQPSQ_MAT_MACADDR_SHIFT 0
-#define I40IW_CQPSQ_MAT_MACADDR_MASK \
- (0xffffffffffffULL << I40IW_CQPSQ_MAT_MACADDR_SHIFT)
-
-#define I40IW_CQPSQ_MAT_ARPENTRYIDX_SHIFT 0
-#define I40IW_CQPSQ_MAT_ARPENTRYIDX_MASK \
- (0xfffUL << I40IW_CQPSQ_MAT_ARPENTRYIDX_SHIFT)
-
-#define I40IW_CQPSQ_MAT_ENTRYVALID_SHIFT 42
-#define I40IW_CQPSQ_MAT_ENTRYVALID_MASK \
- (1ULL << I40IW_CQPSQ_MAT_ENTRYVALID_SHIFT)
-
-#define I40IW_CQPSQ_MAT_PERMANENT_SHIFT 43
-#define I40IW_CQPSQ_MAT_PERMANENT_MASK \
- (1ULL << I40IW_CQPSQ_MAT_PERMANENT_SHIFT)
-
-#define I40IW_CQPSQ_MAT_QUERY_SHIFT 44
-#define I40IW_CQPSQ_MAT_QUERY_MASK (1ULL << I40IW_CQPSQ_MAT_QUERY_SHIFT)
-
-/* Manage VF PBLE Backing Pages - MVPBP*/
-#define I40IW_CQPSQ_MVPBP_PD_ENTRY_CNT_SHIFT 0
-#define I40IW_CQPSQ_MVPBP_PD_ENTRY_CNT_MASK \
- (0x3ffULL << I40IW_CQPSQ_MVPBP_PD_ENTRY_CNT_SHIFT)
-
-#define I40IW_CQPSQ_MVPBP_FIRST_PD_INX_SHIFT 16
-#define I40IW_CQPSQ_MVPBP_FIRST_PD_INX_MASK \
- (0x1ffULL << I40IW_CQPSQ_MVPBP_FIRST_PD_INX_SHIFT)
-
-#define I40IW_CQPSQ_MVPBP_SD_INX_SHIFT 32
-#define I40IW_CQPSQ_MVPBP_SD_INX_MASK \
- (0xfffULL << I40IW_CQPSQ_MVPBP_SD_INX_SHIFT)
-
-#define I40IW_CQPSQ_MVPBP_INV_PD_ENT_SHIFT 62
-#define I40IW_CQPSQ_MVPBP_INV_PD_ENT_MASK \
- (0x1ULL << I40IW_CQPSQ_MVPBP_INV_PD_ENT_SHIFT)
-
-#define I40IW_CQPSQ_MVPBP_PD_PLPBA_SHIFT 3
-#define I40IW_CQPSQ_MVPBP_PD_PLPBA_MASK \
- (0x1fffffffffffffffULL << I40IW_CQPSQ_MVPBP_PD_PLPBA_SHIFT)
-
-#define I40IW_INVALID_PUSH_PAGE_INDEX 0xffff
-
-#define I40IW_CQPSQ_MPP_QS_HANDLE_SHIFT 0
-#define I40IW_CQPSQ_MPP_QS_HANDLE_MASK (0xffffUL << \
- I40IW_CQPSQ_MPP_QS_HANDLE_SHIFT)
-
-#define I40IW_CQPSQ_MPP_PPIDX_SHIFT 0
-#define I40IW_CQPSQ_MPP_PPIDX_MASK (0x3ffUL << I40IW_CQPSQ_MPP_PPIDX_SHIFT)
-
-#define I40IW_CQPSQ_MPP_FREE_PAGE_SHIFT 62
-#define I40IW_CQPSQ_MPP_FREE_PAGE_MASK (1ULL << I40IW_CQPSQ_MPP_FREE_PAGE_SHIFT)
-
-/* Upload Context - UCTX */
-#define I40IW_CQPSQ_UCTX_QPCTXADDR_SHIFT I40IW_CQPHC_QPCTX_SHIFT
-#define I40IW_CQPSQ_UCTX_QPCTXADDR_MASK I40IW_CQPHC_QPCTX_MASK
-
-#define I40IW_CQPSQ_UCTX_QPID_SHIFT 0
-#define I40IW_CQPSQ_UCTX_QPID_MASK (0x3ffffUL << I40IW_CQPSQ_UCTX_QPID_SHIFT)
-
-#define I40IW_CQPSQ_UCTX_QPTYPE_SHIFT 48
-#define I40IW_CQPSQ_UCTX_QPTYPE_MASK (0xfULL << I40IW_CQPSQ_UCTX_QPTYPE_SHIFT)
-
-#define I40IW_CQPSQ_UCTX_RAWFORMAT_SHIFT 61
-#define I40IW_CQPSQ_UCTX_RAWFORMAT_MASK \
- (1ULL << I40IW_CQPSQ_UCTX_RAWFORMAT_SHIFT)
-
-#define I40IW_CQPSQ_UCTX_FREEZEQP_SHIFT 62
-#define I40IW_CQPSQ_UCTX_FREEZEQP_MASK \
- (1ULL << I40IW_CQPSQ_UCTX_FREEZEQP_SHIFT)
-
-/* Manage HMC PM Function Table - MHMC */
-#define I40IW_CQPSQ_MHMC_VFIDX_SHIFT 0
-#define I40IW_CQPSQ_MHMC_VFIDX_MASK (0x7fUL << I40IW_CQPSQ_MHMC_VFIDX_SHIFT)
-
-#define I40IW_CQPSQ_MHMC_FREEPMFN_SHIFT 62
-#define I40IW_CQPSQ_MHMC_FREEPMFN_MASK \
- (1ULL << I40IW_CQPSQ_MHMC_FREEPMFN_SHIFT)
-
-/* Set HMC Resource Profile - SHMCRP */
-#define I40IW_CQPSQ_SHMCRP_HMC_PROFILE_SHIFT 0
-#define I40IW_CQPSQ_SHMCRP_HMC_PROFILE_MASK \
- (0x7ULL << I40IW_CQPSQ_SHMCRP_HMC_PROFILE_SHIFT)
-#define I40IW_CQPSQ_SHMCRP_VFNUM_SHIFT 32
-#define I40IW_CQPSQ_SHMCRP_VFNUM_MASK (0x3fULL << I40IW_CQPSQ_SHMCRP_VFNUM_SHIFT)
-
-/* Create/Destroy CEQ */
-#define I40IW_CQPSQ_CEQ_CEQSIZE_SHIFT 0
-#define I40IW_CQPSQ_CEQ_CEQSIZE_MASK \
- (0x1ffffUL << I40IW_CQPSQ_CEQ_CEQSIZE_SHIFT)
-
-#define I40IW_CQPSQ_CEQ_CEQID_SHIFT 0
-#define I40IW_CQPSQ_CEQ_CEQID_MASK (0x7fUL << I40IW_CQPSQ_CEQ_CEQID_SHIFT)
-
-#define I40IW_CQPSQ_CEQ_LPBLSIZE_SHIFT I40IW_CQPSQ_CQ_LPBLSIZE_SHIFT
-#define I40IW_CQPSQ_CEQ_LPBLSIZE_MASK I40IW_CQPSQ_CQ_LPBLSIZE_MASK
-
-#define I40IW_CQPSQ_CEQ_VMAP_SHIFT 47
-#define I40IW_CQPSQ_CEQ_VMAP_MASK (1ULL << I40IW_CQPSQ_CEQ_VMAP_SHIFT)
-
-#define I40IW_CQPSQ_CEQ_FIRSTPMPBLIDX_SHIFT 0
-#define I40IW_CQPSQ_CEQ_FIRSTPMPBLIDX_MASK \
- (0xfffffffUL << I40IW_CQPSQ_CEQ_FIRSTPMPBLIDX_SHIFT)
-
-/* Create/Destroy AEQ */
-#define I40IW_CQPSQ_AEQ_AEQECNT_SHIFT 0
-#define I40IW_CQPSQ_AEQ_AEQECNT_MASK \
- (0x7ffffUL << I40IW_CQPSQ_AEQ_AEQECNT_SHIFT)
-
-#define I40IW_CQPSQ_AEQ_LPBLSIZE_SHIFT I40IW_CQPSQ_CQ_LPBLSIZE_SHIFT
-#define I40IW_CQPSQ_AEQ_LPBLSIZE_MASK I40IW_CQPSQ_CQ_LPBLSIZE_MASK
-
-#define I40IW_CQPSQ_AEQ_VMAP_SHIFT 47
-#define I40IW_CQPSQ_AEQ_VMAP_MASK (1ULL << I40IW_CQPSQ_AEQ_VMAP_SHIFT)
-
-#define I40IW_CQPSQ_AEQ_FIRSTPMPBLIDX_SHIFT 0
-#define I40IW_CQPSQ_AEQ_FIRSTPMPBLIDX_MASK \
- (0xfffffffUL << I40IW_CQPSQ_AEQ_FIRSTPMPBLIDX_SHIFT)
-
-/* Commit FPM Values - CFPM */
-#define I40IW_CQPSQ_CFPM_HMCFNID_SHIFT 0
-#define I40IW_CQPSQ_CFPM_HMCFNID_MASK (0x3fUL << I40IW_CQPSQ_CFPM_HMCFNID_SHIFT)
-
-/* Flush WQEs - FWQE */
-#define I40IW_CQPSQ_FWQE_AECODE_SHIFT 0
-#define I40IW_CQPSQ_FWQE_AECODE_MASK (0xffffUL << I40IW_CQPSQ_FWQE_AECODE_SHIFT)
-
-#define I40IW_CQPSQ_FWQE_AESOURCE_SHIFT 16
-#define I40IW_CQPSQ_FWQE_AESOURCE_MASK \
- (0xfUL << I40IW_CQPSQ_FWQE_AESOURCE_SHIFT)
-
-#define I40IW_CQPSQ_FWQE_RQMNERR_SHIFT 0
-#define I40IW_CQPSQ_FWQE_RQMNERR_MASK \
- (0xffffUL << I40IW_CQPSQ_FWQE_RQMNERR_SHIFT)
-
-#define I40IW_CQPSQ_FWQE_RQMJERR_SHIFT 16
-#define I40IW_CQPSQ_FWQE_RQMJERR_MASK \
- (0xffffUL << I40IW_CQPSQ_FWQE_RQMJERR_SHIFT)
-
-#define I40IW_CQPSQ_FWQE_SQMNERR_SHIFT 32
-#define I40IW_CQPSQ_FWQE_SQMNERR_MASK \
- (0xffffULL << I40IW_CQPSQ_FWQE_SQMNERR_SHIFT)
-
-#define I40IW_CQPSQ_FWQE_SQMJERR_SHIFT 48
-#define I40IW_CQPSQ_FWQE_SQMJERR_MASK \
- (0xffffULL << I40IW_CQPSQ_FWQE_SQMJERR_SHIFT)
-
-#define I40IW_CQPSQ_FWQE_QPID_SHIFT 0
-#define I40IW_CQPSQ_FWQE_QPID_MASK (0x3ffffULL << I40IW_CQPSQ_FWQE_QPID_SHIFT)
-
-#define I40IW_CQPSQ_FWQE_GENERATE_AE_SHIFT 59
-#define I40IW_CQPSQ_FWQE_GENERATE_AE_MASK (1ULL << \
- I40IW_CQPSQ_FWQE_GENERATE_AE_SHIFT)
-
-#define I40IW_CQPSQ_FWQE_USERFLCODE_SHIFT 60
-#define I40IW_CQPSQ_FWQE_USERFLCODE_MASK \
- (1ULL << I40IW_CQPSQ_FWQE_USERFLCODE_SHIFT)
-
-#define I40IW_CQPSQ_FWQE_FLUSHSQ_SHIFT 61
-#define I40IW_CQPSQ_FWQE_FLUSHSQ_MASK (1ULL << I40IW_CQPSQ_FWQE_FLUSHSQ_SHIFT)
-
-#define I40IW_CQPSQ_FWQE_FLUSHRQ_SHIFT 62
-#define I40IW_CQPSQ_FWQE_FLUSHRQ_MASK (1ULL << I40IW_CQPSQ_FWQE_FLUSHRQ_SHIFT)
-
-/* Manage Accelerated Port Table - MAPT */
-#define I40IW_CQPSQ_MAPT_PORT_SHIFT 0
-#define I40IW_CQPSQ_MAPT_PORT_MASK (0xffffUL << I40IW_CQPSQ_MAPT_PORT_SHIFT)
-
-#define I40IW_CQPSQ_MAPT_ADDPORT_SHIFT 62
-#define I40IW_CQPSQ_MAPT_ADDPORT_MASK (1ULL << I40IW_CQPSQ_MAPT_ADDPORT_SHIFT)
-
-/* Update Protocol Engine SDs */
-#define I40IW_CQPSQ_UPESD_SDCMD_SHIFT 0
-#define I40IW_CQPSQ_UPESD_SDCMD_MASK (0xffffffffUL << I40IW_CQPSQ_UPESD_SDCMD_SHIFT)
-
-#define I40IW_CQPSQ_UPESD_SDDATALOW_SHIFT 0
-#define I40IW_CQPSQ_UPESD_SDDATALOW_MASK \
- (0xffffffffUL << I40IW_CQPSQ_UPESD_SDDATALOW_SHIFT)
-
-#define I40IW_CQPSQ_UPESD_SDDATAHI_SHIFT 32
-#define I40IW_CQPSQ_UPESD_SDDATAHI_MASK \
- (0xffffffffULL << I40IW_CQPSQ_UPESD_SDDATAHI_SHIFT)
-#define I40IW_CQPSQ_UPESD_HMCFNID_SHIFT 0
-#define I40IW_CQPSQ_UPESD_HMCFNID_MASK \
- (0x3fUL << I40IW_CQPSQ_UPESD_HMCFNID_SHIFT)
-
-#define I40IW_CQPSQ_UPESD_ENTRY_VALID_SHIFT 63
-#define I40IW_CQPSQ_UPESD_ENTRY_VALID_MASK \
- ((u64)1 << I40IW_CQPSQ_UPESD_ENTRY_VALID_SHIFT)
-
-#define I40IW_CQPSQ_UPESD_ENTRY_COUNT_SHIFT 0
-#define I40IW_CQPSQ_UPESD_ENTRY_COUNT_MASK \
- (0xfUL << I40IW_CQPSQ_UPESD_ENTRY_COUNT_SHIFT)
-
-#define I40IW_CQPSQ_UPESD_SKIP_ENTRY_SHIFT 7
-#define I40IW_CQPSQ_UPESD_SKIP_ENTRY_MASK \
- (0x1UL << I40IW_CQPSQ_UPESD_SKIP_ENTRY_SHIFT)
-
-/* Suspend QP */
-#define I40IW_CQPSQ_SUSPENDQP_QPID_SHIFT 0
-#define I40IW_CQPSQ_SUSPENDQP_QPID_MASK (0x3FFFFUL)
-/* I40IWCQ_QPID_MASK */
-
-/* Resume QP */
-#define I40IW_CQPSQ_RESUMEQP_QSHANDLE_SHIFT 0
-#define I40IW_CQPSQ_RESUMEQP_QSHANDLE_MASK \
- (0xffffffffUL << I40IW_CQPSQ_RESUMEQP_QSHANDLE_SHIFT)
-
-#define I40IW_CQPSQ_RESUMEQP_QPID_SHIFT 0
-#define I40IW_CQPSQ_RESUMEQP_QPID_MASK (0x3FFFFUL)
-/* I40IWCQ_QPID_MASK */
-
-/* IW QP Context */
-#define I40IWQPC_DDP_VER_SHIFT 0
-#define I40IWQPC_DDP_VER_MASK (3UL << I40IWQPC_DDP_VER_SHIFT)
-
-#define I40IWQPC_SNAP_SHIFT 2
-#define I40IWQPC_SNAP_MASK (1UL << I40IWQPC_SNAP_SHIFT)
-
-#define I40IWQPC_IPV4_SHIFT 3
-#define I40IWQPC_IPV4_MASK (1UL << I40IWQPC_IPV4_SHIFT)
-
-#define I40IWQPC_NONAGLE_SHIFT 4
-#define I40IWQPC_NONAGLE_MASK (1UL << I40IWQPC_NONAGLE_SHIFT)
-
-#define I40IWQPC_INSERTVLANTAG_SHIFT 5
-#define I40IWQPC_INSERTVLANTAG_MASK (1 << I40IWQPC_INSERTVLANTAG_SHIFT)
-
-#define I40IWQPC_USESRQ_SHIFT 6
-#define I40IWQPC_USESRQ_MASK (1UL << I40IWQPC_USESRQ_SHIFT)
-
-#define I40IWQPC_TIMESTAMP_SHIFT 7
-#define I40IWQPC_TIMESTAMP_MASK (1UL << I40IWQPC_TIMESTAMP_SHIFT)
-
-#define I40IWQPC_RQWQESIZE_SHIFT 8
-#define I40IWQPC_RQWQESIZE_MASK (3UL << I40IWQPC_RQWQESIZE_SHIFT)
-
-#define I40IWQPC_INSERTL2TAG2_SHIFT 11
-#define I40IWQPC_INSERTL2TAG2_MASK (1UL << I40IWQPC_INSERTL2TAG2_SHIFT)
-
-#define I40IWQPC_LIMIT_SHIFT 12
-#define I40IWQPC_LIMIT_MASK (3UL << I40IWQPC_LIMIT_SHIFT)
-
-#define I40IWQPC_DROPOOOSEG_SHIFT 15
-#define I40IWQPC_DROPOOOSEG_MASK (1UL << I40IWQPC_DROPOOOSEG_SHIFT)
-
-#define I40IWQPC_DUPACK_THRESH_SHIFT 16
-#define I40IWQPC_DUPACK_THRESH_MASK (7UL << I40IWQPC_DUPACK_THRESH_SHIFT)
-
-#define I40IWQPC_ERR_RQ_IDX_VALID_SHIFT 19
-#define I40IWQPC_ERR_RQ_IDX_VALID_MASK (1UL << I40IWQPC_ERR_RQ_IDX_VALID_SHIFT)
-
-#define I40IWQPC_DIS_VLAN_CHECKS_SHIFT 19
-#define I40IWQPC_DIS_VLAN_CHECKS_MASK (7UL << I40IWQPC_DIS_VLAN_CHECKS_SHIFT)
-
-#define I40IWQPC_RCVTPHEN_SHIFT 28
-#define I40IWQPC_RCVTPHEN_MASK (1UL << I40IWQPC_RCVTPHEN_SHIFT)
-
-#define I40IWQPC_XMITTPHEN_SHIFT 29
-#define I40IWQPC_XMITTPHEN_MASK (1ULL << I40IWQPC_XMITTPHEN_SHIFT)
-
-#define I40IWQPC_RQTPHEN_SHIFT 30
-#define I40IWQPC_RQTPHEN_MASK (1UL << I40IWQPC_RQTPHEN_SHIFT)
-
-#define I40IWQPC_SQTPHEN_SHIFT 31
-#define I40IWQPC_SQTPHEN_MASK (1ULL << I40IWQPC_SQTPHEN_SHIFT)
-
-#define I40IWQPC_PPIDX_SHIFT 32
-#define I40IWQPC_PPIDX_MASK (0x3ffULL << I40IWQPC_PPIDX_SHIFT)
-
-#define I40IWQPC_PMENA_SHIFT 47
-#define I40IWQPC_PMENA_MASK (1ULL << I40IWQPC_PMENA_SHIFT)
-
-#define I40IWQPC_RDMAP_VER_SHIFT 62
-#define I40IWQPC_RDMAP_VER_MASK (3ULL << I40IWQPC_RDMAP_VER_SHIFT)
-
-#define I40IWQPC_SQADDR_SHIFT I40IW_CQPHC_QPCTX_SHIFT
-#define I40IWQPC_SQADDR_MASK I40IW_CQPHC_QPCTX_MASK
-
-#define I40IWQPC_RQADDR_SHIFT I40IW_CQPHC_QPCTX_SHIFT
-#define I40IWQPC_RQADDR_MASK I40IW_CQPHC_QPCTX_MASK
-
-#define I40IWQPC_TTL_SHIFT 0
-#define I40IWQPC_TTL_MASK (0xffUL << I40IWQPC_TTL_SHIFT)
-
-#define I40IWQPC_RQSIZE_SHIFT 8
-#define I40IWQPC_RQSIZE_MASK (0xfUL << I40IWQPC_RQSIZE_SHIFT)
-
-#define I40IWQPC_SQSIZE_SHIFT 12
-#define I40IWQPC_SQSIZE_MASK (0xfUL << I40IWQPC_SQSIZE_SHIFT)
-
-#define I40IWQPC_SRCMACADDRIDX_SHIFT 16
-#define I40IWQPC_SRCMACADDRIDX_MASK (0x3fUL << I40IWQPC_SRCMACADDRIDX_SHIFT)
-
-#define I40IWQPC_AVOIDSTRETCHACK_SHIFT 23
-#define I40IWQPC_AVOIDSTRETCHACK_MASK (1UL << I40IWQPC_AVOIDSTRETCHACK_SHIFT)
-
-#define I40IWQPC_TOS_SHIFT 24
-#define I40IWQPC_TOS_MASK (0xffUL << I40IWQPC_TOS_SHIFT)
-
-#define I40IWQPC_SRCPORTNUM_SHIFT 32
-#define I40IWQPC_SRCPORTNUM_MASK (0xffffULL << I40IWQPC_SRCPORTNUM_SHIFT)
-
-#define I40IWQPC_DESTPORTNUM_SHIFT 48
-#define I40IWQPC_DESTPORTNUM_MASK (0xffffULL << I40IWQPC_DESTPORTNUM_SHIFT)
-
-#define I40IWQPC_DESTIPADDR0_SHIFT 32
-#define I40IWQPC_DESTIPADDR0_MASK \
- (0xffffffffULL << I40IWQPC_DESTIPADDR0_SHIFT)
-
-#define I40IWQPC_DESTIPADDR1_SHIFT 0
-#define I40IWQPC_DESTIPADDR1_MASK \
- (0xffffffffULL << I40IWQPC_DESTIPADDR1_SHIFT)
-
-#define I40IWQPC_DESTIPADDR2_SHIFT 32
-#define I40IWQPC_DESTIPADDR2_MASK \
- (0xffffffffULL << I40IWQPC_DESTIPADDR2_SHIFT)
-
-#define I40IWQPC_DESTIPADDR3_SHIFT 0
-#define I40IWQPC_DESTIPADDR3_MASK \
- (0xffffffffULL << I40IWQPC_DESTIPADDR3_SHIFT)
-
-#define I40IWQPC_SNDMSS_SHIFT 16
-#define I40IWQPC_SNDMSS_MASK (0x3fffUL << I40IWQPC_SNDMSS_SHIFT)
-
-#define I40IW_UDA_QPC_MAXFRAMESIZE_SHIFT 16
-#define I40IW_UDA_QPC_MAXFRAMESIZE_MASK (0x3fffUL << I40IW_UDA_QPC_MAXFRAMESIZE_SHIFT)
-
-#define I40IWQPC_VLANTAG_SHIFT 32
-#define I40IWQPC_VLANTAG_MASK (0xffffULL << I40IWQPC_VLANTAG_SHIFT)
-
-#define I40IWQPC_ARPIDX_SHIFT 48
-#define I40IWQPC_ARPIDX_MASK (0xffffULL << I40IWQPC_ARPIDX_SHIFT)
-
-#define I40IWQPC_FLOWLABEL_SHIFT 0
-#define I40IWQPC_FLOWLABEL_MASK (0xfffffUL << I40IWQPC_FLOWLABEL_SHIFT)
-
-#define I40IWQPC_WSCALE_SHIFT 20
-#define I40IWQPC_WSCALE_MASK (1UL << I40IWQPC_WSCALE_SHIFT)
-
-#define I40IWQPC_KEEPALIVE_SHIFT 21
-#define I40IWQPC_KEEPALIVE_MASK (1UL << I40IWQPC_KEEPALIVE_SHIFT)
-
-#define I40IWQPC_IGNORE_TCP_OPT_SHIFT 22
-#define I40IWQPC_IGNORE_TCP_OPT_MASK (1UL << I40IWQPC_IGNORE_TCP_OPT_SHIFT)
-
-#define I40IWQPC_IGNORE_TCP_UNS_OPT_SHIFT 23
-#define I40IWQPC_IGNORE_TCP_UNS_OPT_MASK \
- (1UL << I40IWQPC_IGNORE_TCP_UNS_OPT_SHIFT)
-
-#define I40IWQPC_TCPSTATE_SHIFT 28
-#define I40IWQPC_TCPSTATE_MASK (0xfUL << I40IWQPC_TCPSTATE_SHIFT)
-
-#define I40IWQPC_RCVSCALE_SHIFT 32
-#define I40IWQPC_RCVSCALE_MASK (0xfULL << I40IWQPC_RCVSCALE_SHIFT)
-
-#define I40IWQPC_SNDSCALE_SHIFT 40
-#define I40IWQPC_SNDSCALE_MASK (0xfULL << I40IWQPC_SNDSCALE_SHIFT)
-
-#define I40IWQPC_PDIDX_SHIFT 48
-#define I40IWQPC_PDIDX_MASK (0x7fffULL << I40IWQPC_PDIDX_SHIFT)
-
-#define I40IWQPC_KALIVE_TIMER_MAX_PROBES_SHIFT 16
-#define I40IWQPC_KALIVE_TIMER_MAX_PROBES_MASK \
- (0xffUL << I40IWQPC_KALIVE_TIMER_MAX_PROBES_SHIFT)
-
-#define I40IWQPC_KEEPALIVE_INTERVAL_SHIFT 24
-#define I40IWQPC_KEEPALIVE_INTERVAL_MASK \
- (0xffUL << I40IWQPC_KEEPALIVE_INTERVAL_SHIFT)
-
-#define I40IWQPC_TIMESTAMP_RECENT_SHIFT 0
-#define I40IWQPC_TIMESTAMP_RECENT_MASK \
- (0xffffffffUL << I40IWQPC_TIMESTAMP_RECENT_SHIFT)
-
-#define I40IWQPC_TIMESTAMP_AGE_SHIFT 32
-#define I40IWQPC_TIMESTAMP_AGE_MASK \
- (0xffffffffULL << I40IWQPC_TIMESTAMP_AGE_SHIFT)
-
-#define I40IWQPC_SNDNXT_SHIFT 0
-#define I40IWQPC_SNDNXT_MASK (0xffffffffUL << I40IWQPC_SNDNXT_SHIFT)
-
-#define I40IWQPC_SNDWND_SHIFT 32
-#define I40IWQPC_SNDWND_MASK (0xffffffffULL << I40IWQPC_SNDWND_SHIFT)
-
-#define I40IWQPC_RCVNXT_SHIFT 0
-#define I40IWQPC_RCVNXT_MASK (0xffffffffUL << I40IWQPC_RCVNXT_SHIFT)
-
-#define I40IWQPC_RCVWND_SHIFT 32
-#define I40IWQPC_RCVWND_MASK (0xffffffffULL << I40IWQPC_RCVWND_SHIFT)
-
-#define I40IWQPC_SNDMAX_SHIFT 0
-#define I40IWQPC_SNDMAX_MASK (0xffffffffUL << I40IWQPC_SNDMAX_SHIFT)
-
-#define I40IWQPC_SNDUNA_SHIFT 32
-#define I40IWQPC_SNDUNA_MASK (0xffffffffULL << I40IWQPC_SNDUNA_SHIFT)
-
-#define I40IWQPC_SRTT_SHIFT 0
-#define I40IWQPC_SRTT_MASK (0xffffffffUL << I40IWQPC_SRTT_SHIFT)
-
-#define I40IWQPC_RTTVAR_SHIFT 32
-#define I40IWQPC_RTTVAR_MASK (0xffffffffULL << I40IWQPC_RTTVAR_SHIFT)
-
-#define I40IWQPC_SSTHRESH_SHIFT 0
-#define I40IWQPC_SSTHRESH_MASK (0xffffffffUL << I40IWQPC_SSTHRESH_SHIFT)
-
-#define I40IWQPC_CWND_SHIFT 32
-#define I40IWQPC_CWND_MASK (0xffffffffULL << I40IWQPC_CWND_SHIFT)
-
-#define I40IWQPC_SNDWL1_SHIFT 0
-#define I40IWQPC_SNDWL1_MASK (0xffffffffUL << I40IWQPC_SNDWL1_SHIFT)
-
-#define I40IWQPC_SNDWL2_SHIFT 32
-#define I40IWQPC_SNDWL2_MASK (0xffffffffULL << I40IWQPC_SNDWL2_SHIFT)
-
-#define I40IWQPC_ERR_RQ_IDX_SHIFT 32
-#define I40IWQPC_ERR_RQ_IDX_MASK (0x3fffULL << I40IWQPC_ERR_RQ_IDX_SHIFT)
-
-#define I40IWQPC_MAXSNDWND_SHIFT 0
-#define I40IWQPC_MAXSNDWND_MASK (0xffffffffUL << I40IWQPC_MAXSNDWND_SHIFT)
-
-#define I40IWQPC_REXMIT_THRESH_SHIFT 48
-#define I40IWQPC_REXMIT_THRESH_MASK (0x3fULL << I40IWQPC_REXMIT_THRESH_SHIFT)
-
-#define I40IWQPC_TXCQNUM_SHIFT 0
-#define I40IWQPC_TXCQNUM_MASK (0x1ffffUL << I40IWQPC_TXCQNUM_SHIFT)
-
-#define I40IWQPC_RXCQNUM_SHIFT 32
-#define I40IWQPC_RXCQNUM_MASK (0x1ffffULL << I40IWQPC_RXCQNUM_SHIFT)
-
-#define I40IWQPC_STAT_INDEX_SHIFT 0
-#define I40IWQPC_STAT_INDEX_MASK (0x1fULL << I40IWQPC_STAT_INDEX_SHIFT)
-
-#define I40IWQPC_Q2ADDR_SHIFT 0
-#define I40IWQPC_Q2ADDR_MASK (0xffffffffffffff00ULL << I40IWQPC_Q2ADDR_SHIFT)
-
-#define I40IWQPC_LASTBYTESENT_SHIFT 0
-#define I40IWQPC_LASTBYTESENT_MASK (0xffUL << I40IWQPC_LASTBYTESENT_SHIFT)
-
-#define I40IWQPC_SRQID_SHIFT 32
-#define I40IWQPC_SRQID_MASK (0xffULL << I40IWQPC_SRQID_SHIFT)
-
-#define I40IWQPC_ORDSIZE_SHIFT 0
-#define I40IWQPC_ORDSIZE_MASK (0x7fUL << I40IWQPC_ORDSIZE_SHIFT)
-
-#define I40IWQPC_IRDSIZE_SHIFT 16
-#define I40IWQPC_IRDSIZE_MASK (0x3UL << I40IWQPC_IRDSIZE_SHIFT)
-
-#define I40IWQPC_WRRDRSPOK_SHIFT 20
-#define I40IWQPC_WRRDRSPOK_MASK (1UL << I40IWQPC_WRRDRSPOK_SHIFT)
-
-#define I40IWQPC_RDOK_SHIFT 21
-#define I40IWQPC_RDOK_MASK (1UL << I40IWQPC_RDOK_SHIFT)
-
-#define I40IWQPC_SNDMARKERS_SHIFT 22
-#define I40IWQPC_SNDMARKERS_MASK (1UL << I40IWQPC_SNDMARKERS_SHIFT)
-
-#define I40IWQPC_BINDEN_SHIFT 23
-#define I40IWQPC_BINDEN_MASK (1UL << I40IWQPC_BINDEN_SHIFT)
-
-#define I40IWQPC_FASTREGEN_SHIFT 24
-#define I40IWQPC_FASTREGEN_MASK (1UL << I40IWQPC_FASTREGEN_SHIFT)
-
-#define I40IWQPC_PRIVEN_SHIFT 25
-#define I40IWQPC_PRIVEN_MASK (1UL << I40IWQPC_PRIVEN_SHIFT)
-
-#define I40IWQPC_USESTATSINSTANCE_SHIFT 26
-#define I40IWQPC_USESTATSINSTANCE_MASK (1UL << I40IWQPC_USESTATSINSTANCE_SHIFT)
-
-#define I40IWQPC_IWARPMODE_SHIFT 28
-#define I40IWQPC_IWARPMODE_MASK (1UL << I40IWQPC_IWARPMODE_SHIFT)
-
-#define I40IWQPC_RCVMARKERS_SHIFT 29
-#define I40IWQPC_RCVMARKERS_MASK (1UL << I40IWQPC_RCVMARKERS_SHIFT)
-
-#define I40IWQPC_ALIGNHDRS_SHIFT 30
-#define I40IWQPC_ALIGNHDRS_MASK (1UL << I40IWQPC_ALIGNHDRS_SHIFT)
-
-#define I40IWQPC_RCVNOMPACRC_SHIFT 31
-#define I40IWQPC_RCVNOMPACRC_MASK (1UL << I40IWQPC_RCVNOMPACRC_SHIFT)
-
-#define I40IWQPC_RCVMARKOFFSET_SHIFT 33
-#define I40IWQPC_RCVMARKOFFSET_MASK (0x1ffULL << I40IWQPC_RCVMARKOFFSET_SHIFT)
-
-#define I40IWQPC_SNDMARKOFFSET_SHIFT 48
-#define I40IWQPC_SNDMARKOFFSET_MASK (0x1ffULL << I40IWQPC_SNDMARKOFFSET_SHIFT)
-
-#define I40IWQPC_QPCOMPCTX_SHIFT I40IW_CQPHC_QPCTX_SHIFT
-#define I40IWQPC_QPCOMPCTX_MASK I40IW_CQPHC_QPCTX_MASK
-
-#define I40IWQPC_SQTPHVAL_SHIFT 0
-#define I40IWQPC_SQTPHVAL_MASK (0xffUL << I40IWQPC_SQTPHVAL_SHIFT)
-
-#define I40IWQPC_RQTPHVAL_SHIFT 8
-#define I40IWQPC_RQTPHVAL_MASK (0xffUL << I40IWQPC_RQTPHVAL_SHIFT)
-
-#define I40IWQPC_QSHANDLE_SHIFT 16
-#define I40IWQPC_QSHANDLE_MASK (0x3ffUL << I40IWQPC_QSHANDLE_SHIFT)
-
-#define I40IWQPC_EXCEPTION_LAN_QUEUE_SHIFT 32
-#define I40IWQPC_EXCEPTION_LAN_QUEUE_MASK (0xfffULL << \
- I40IWQPC_EXCEPTION_LAN_QUEUE_SHIFT)
-
-#define I40IWQPC_LOCAL_IPADDR3_SHIFT 0
-#define I40IWQPC_LOCAL_IPADDR3_MASK \
- (0xffffffffUL << I40IWQPC_LOCAL_IPADDR3_SHIFT)
-
-#define I40IWQPC_LOCAL_IPADDR2_SHIFT 32
-#define I40IWQPC_LOCAL_IPADDR2_MASK \
- (0xffffffffULL << I40IWQPC_LOCAL_IPADDR2_SHIFT)
-
-#define I40IWQPC_LOCAL_IPADDR1_SHIFT 0
-#define I40IWQPC_LOCAL_IPADDR1_MASK \
- (0xffffffffUL << I40IWQPC_LOCAL_IPADDR1_SHIFT)
-
-#define I40IWQPC_LOCAL_IPADDR0_SHIFT 32
-#define I40IWQPC_LOCAL_IPADDR0_MASK \
- (0xffffffffULL << I40IWQPC_LOCAL_IPADDR0_SHIFT)
-
-/* wqe size considering 32 bytes per wqe*/
-#define I40IW_QP_SW_MIN_WQSIZE 4 /*in WRs*/
-#define I40IW_SQ_RSVD 2
-#define I40IW_RQ_RSVD 1
-#define I40IW_MAX_QUANTAS_PER_WR 2
-#define I40IW_QP_SW_MAX_SQ_QUANTAS 2048
-#define I40IW_QP_SW_MAX_RQ_QUANTAS 16384
-#define I40IW_MAX_QP_WRS ((I40IW_QP_SW_MAX_SQ_QUANTAS / I40IW_MAX_QUANTAS_PER_WR) - 1)
-
-#define I40IWQP_OP_RDMA_WRITE 0
-#define I40IWQP_OP_RDMA_READ 1
-#define I40IWQP_OP_RDMA_SEND 3
-#define I40IWQP_OP_RDMA_SEND_INV 4
-#define I40IWQP_OP_RDMA_SEND_SOL_EVENT 5
-#define I40IWQP_OP_RDMA_SEND_SOL_EVENT_INV 6
-#define I40IWQP_OP_BIND_MW 8
-#define I40IWQP_OP_FAST_REGISTER 9
-#define I40IWQP_OP_LOCAL_INVALIDATE 10
-#define I40IWQP_OP_RDMA_READ_LOC_INV 11
-#define I40IWQP_OP_NOP 12
-
-#define I40IW_RSVD_SHIFT 41
-#define I40IW_RSVD_MASK (0x7fffULL << I40IW_RSVD_SHIFT)
-
-/* iwarp QP SQ WQE common fields */
-#define I40IWQPSQ_OPCODE_SHIFT 32
-#define I40IWQPSQ_OPCODE_MASK (0x3fULL << I40IWQPSQ_OPCODE_SHIFT)
-
-#define I40IWQPSQ_ADDFRAGCNT_SHIFT 38
-#define I40IWQPSQ_ADDFRAGCNT_MASK (0x7ULL << I40IWQPSQ_ADDFRAGCNT_SHIFT)
-
-#define I40IWQPSQ_STREAMMODE_SHIFT 58
-#define I40IWQPSQ_STREAMMODE_MASK (1ULL << I40IWQPSQ_STREAMMODE_SHIFT)
-
-#define I40IWQPSQ_WAITFORRCVPDU_SHIFT 59
-#define I40IWQPSQ_WAITFORRCVPDU_MASK (1ULL << I40IWQPSQ_WAITFORRCVPDU_SHIFT)
-
-#define I40IWQPSQ_READFENCE_SHIFT 60
-#define I40IWQPSQ_READFENCE_MASK (1ULL << I40IWQPSQ_READFENCE_SHIFT)
-
-#define I40IWQPSQ_LOCALFENCE_SHIFT 61
-#define I40IWQPSQ_LOCALFENCE_MASK (1ULL << I40IWQPSQ_LOCALFENCE_SHIFT)
-
-#define I40IWQPSQ_SIGCOMPL_SHIFT 62
-#define I40IWQPSQ_SIGCOMPL_MASK (1ULL << I40IWQPSQ_SIGCOMPL_SHIFT)
-
-#define I40IWQPSQ_VALID_SHIFT 63
-#define I40IWQPSQ_VALID_MASK (1ULL << I40IWQPSQ_VALID_SHIFT)
-
-#define I40IWQPSQ_FRAG_TO_SHIFT I40IW_CQPHC_QPCTX_SHIFT
-#define I40IWQPSQ_FRAG_TO_MASK I40IW_CQPHC_QPCTX_MASK
-
-#define I40IWQPSQ_FRAG_LEN_SHIFT 0
-#define I40IWQPSQ_FRAG_LEN_MASK (0xffffffffUL << I40IWQPSQ_FRAG_LEN_SHIFT)
-
-#define I40IWQPSQ_FRAG_STAG_SHIFT 32
-#define I40IWQPSQ_FRAG_STAG_MASK (0xffffffffULL << I40IWQPSQ_FRAG_STAG_SHIFT)
-
-#define I40IWQPSQ_REMSTAGINV_SHIFT 0
-#define I40IWQPSQ_REMSTAGINV_MASK (0xffffffffUL << I40IWQPSQ_REMSTAGINV_SHIFT)
-
-#define I40IWQPSQ_INLINEDATAFLAG_SHIFT 57
-#define I40IWQPSQ_INLINEDATAFLAG_MASK (1ULL << I40IWQPSQ_INLINEDATAFLAG_SHIFT)
-
-#define I40IWQPSQ_INLINEDATALEN_SHIFT 48
-#define I40IWQPSQ_INLINEDATALEN_MASK \
- (0x7fULL << I40IWQPSQ_INLINEDATALEN_SHIFT)
-
-/* iwarp send with push mode */
-#define I40IWQPSQ_WQDESCIDX_SHIFT 0
-#define I40IWQPSQ_WQDESCIDX_MASK (0x3fffUL << I40IWQPSQ_WQDESCIDX_SHIFT)
-
-/* rdma write */
-#define I40IWQPSQ_REMSTAG_SHIFT 0
-#define I40IWQPSQ_REMSTAG_MASK (0xffffffffUL << I40IWQPSQ_REMSTAG_SHIFT)
-
-#define I40IWQPSQ_REMTO_SHIFT I40IW_CQPHC_QPCTX_SHIFT
-#define I40IWQPSQ_REMTO_MASK I40IW_CQPHC_QPCTX_MASK
-
-/* memory window */
-#define I40IWQPSQ_STAGRIGHTS_SHIFT 48
-#define I40IWQPSQ_STAGRIGHTS_MASK (0x1fULL << I40IWQPSQ_STAGRIGHTS_SHIFT)
-
-#define I40IWQPSQ_VABASEDTO_SHIFT 53
-#define I40IWQPSQ_VABASEDTO_MASK (1ULL << I40IWQPSQ_VABASEDTO_SHIFT)
-
-#define I40IWQPSQ_MWLEN_SHIFT I40IW_CQPHC_QPCTX_SHIFT
-#define I40IWQPSQ_MWLEN_MASK I40IW_CQPHC_QPCTX_MASK
-
-#define I40IWQPSQ_PARENTMRSTAG_SHIFT 0
-#define I40IWQPSQ_PARENTMRSTAG_MASK \
- (0xffffffffUL << I40IWQPSQ_PARENTMRSTAG_SHIFT)
-
-#define I40IWQPSQ_MWSTAG_SHIFT 32
-#define I40IWQPSQ_MWSTAG_MASK (0xffffffffULL << I40IWQPSQ_MWSTAG_SHIFT)
-
-#define I40IWQPSQ_BASEVA_TO_FBO_SHIFT I40IW_CQPHC_QPCTX_SHIFT
-#define I40IWQPSQ_BASEVA_TO_FBO_MASK I40IW_CQPHC_QPCTX_MASK
-
-/* Local Invalidate */
-#define I40IWQPSQ_LOCSTAG_SHIFT 32
-#define I40IWQPSQ_LOCSTAG_MASK (0xffffffffULL << I40IWQPSQ_LOCSTAG_SHIFT)
-
-/* Fast Register */
-#define I40IWQPSQ_STAGKEY_SHIFT 0
-#define I40IWQPSQ_STAGKEY_MASK (0xffUL << I40IWQPSQ_STAGKEY_SHIFT)
-
-#define I40IWQPSQ_STAGINDEX_SHIFT 8
-#define I40IWQPSQ_STAGINDEX_MASK (0xffffffUL << I40IWQPSQ_STAGINDEX_SHIFT)
-
-#define I40IWQPSQ_COPYHOSTPBLS_SHIFT 43
-#define I40IWQPSQ_COPYHOSTPBLS_MASK (1ULL << I40IWQPSQ_COPYHOSTPBLS_SHIFT)
-
-#define I40IWQPSQ_LPBLSIZE_SHIFT 44
-#define I40IWQPSQ_LPBLSIZE_MASK (3ULL << I40IWQPSQ_LPBLSIZE_SHIFT)
-
-#define I40IWQPSQ_HPAGESIZE_SHIFT 46
-#define I40IWQPSQ_HPAGESIZE_MASK (3ULL << I40IWQPSQ_HPAGESIZE_SHIFT)
-
-#define I40IWQPSQ_STAGLEN_SHIFT 0
-#define I40IWQPSQ_STAGLEN_MASK (0x1ffffffffffULL << I40IWQPSQ_STAGLEN_SHIFT)
-
-#define I40IWQPSQ_FIRSTPMPBLIDXLO_SHIFT 48
-#define I40IWQPSQ_FIRSTPMPBLIDXLO_MASK \
- (0xffffULL << I40IWQPSQ_FIRSTPMPBLIDXLO_SHIFT)
-
-#define I40IWQPSQ_FIRSTPMPBLIDXHI_SHIFT 0
-#define I40IWQPSQ_FIRSTPMPBLIDXHI_MASK \
- (0xfffUL << I40IWQPSQ_FIRSTPMPBLIDXHI_SHIFT)
-
-#define I40IWQPSQ_PBLADDR_SHIFT 12
-#define I40IWQPSQ_PBLADDR_MASK (0xfffffffffffffULL << I40IWQPSQ_PBLADDR_SHIFT)
-
-/* iwarp QP RQ WQE common fields */
-#define I40IWQPRQ_ADDFRAGCNT_SHIFT I40IWQPSQ_ADDFRAGCNT_SHIFT
-#define I40IWQPRQ_ADDFRAGCNT_MASK I40IWQPSQ_ADDFRAGCNT_MASK
-
-#define I40IWQPRQ_VALID_SHIFT I40IWQPSQ_VALID_SHIFT
-#define I40IWQPRQ_VALID_MASK I40IWQPSQ_VALID_MASK
-
-#define I40IWQPRQ_COMPLCTX_SHIFT I40IW_CQPHC_QPCTX_SHIFT
-#define I40IWQPRQ_COMPLCTX_MASK I40IW_CQPHC_QPCTX_MASK
-
-#define I40IWQPRQ_FRAG_LEN_SHIFT I40IWQPSQ_FRAG_LEN_SHIFT
-#define I40IWQPRQ_FRAG_LEN_MASK I40IWQPSQ_FRAG_LEN_MASK
-
-#define I40IWQPRQ_STAG_SHIFT I40IWQPSQ_FRAG_STAG_SHIFT
-#define I40IWQPRQ_STAG_MASK I40IWQPSQ_FRAG_STAG_MASK
-
-#define I40IWQPRQ_TO_SHIFT I40IWQPSQ_FRAG_TO_SHIFT
-#define I40IWQPRQ_TO_MASK I40IWQPSQ_FRAG_TO_MASK
-
-/* Query FPM CQP buf */
-#define I40IW_QUERY_FPM_MAX_QPS_SHIFT 0
-#define I40IW_QUERY_FPM_MAX_QPS_MASK \
- (0x7ffffUL << I40IW_QUERY_FPM_MAX_QPS_SHIFT)
-
-#define I40IW_QUERY_FPM_MAX_CQS_SHIFT 0
-#define I40IW_QUERY_FPM_MAX_CQS_MASK \
- (0x3ffffUL << I40IW_QUERY_FPM_MAX_CQS_SHIFT)
-
-#define I40IW_QUERY_FPM_FIRST_PE_SD_INDEX_SHIFT 0
-#define I40IW_QUERY_FPM_FIRST_PE_SD_INDEX_MASK \
- (0x3fffUL << I40IW_QUERY_FPM_FIRST_PE_SD_INDEX_SHIFT)
-
-#define I40IW_QUERY_FPM_MAX_PE_SDS_SHIFT 32
-#define I40IW_QUERY_FPM_MAX_PE_SDS_MASK \
- (0x3fffULL << I40IW_QUERY_FPM_MAX_PE_SDS_SHIFT)
-
-#define I40IW_QUERY_FPM_MAX_QPS_SHIFT 0
-#define I40IW_QUERY_FPM_MAX_QPS_MASK \
- (0x7ffffUL << I40IW_QUERY_FPM_MAX_QPS_SHIFT)
-
-#define I40IW_QUERY_FPM_MAX_CQS_SHIFT 0
-#define I40IW_QUERY_FPM_MAX_CQS_MASK \
- (0x3ffffUL << I40IW_QUERY_FPM_MAX_CQS_SHIFT)
-
-#define I40IW_QUERY_FPM_MAX_CEQS_SHIFT 0
-#define I40IW_QUERY_FPM_MAX_CEQS_MASK \
- (0xffUL << I40IW_QUERY_FPM_MAX_CEQS_SHIFT)
-
-#define I40IW_QUERY_FPM_XFBLOCKSIZE_SHIFT 32
-#define I40IW_QUERY_FPM_XFBLOCKSIZE_MASK \
- (0xffffffffULL << I40IW_QUERY_FPM_XFBLOCKSIZE_SHIFT)
-
-#define I40IW_QUERY_FPM_Q1BLOCKSIZE_SHIFT 32
-#define I40IW_QUERY_FPM_Q1BLOCKSIZE_MASK \
- (0xffffffffULL << I40IW_QUERY_FPM_Q1BLOCKSIZE_SHIFT)
-
-#define I40IW_QUERY_FPM_HTMULTIPLIER_SHIFT 16
-#define I40IW_QUERY_FPM_HTMULTIPLIER_MASK \
- (0xfUL << I40IW_QUERY_FPM_HTMULTIPLIER_SHIFT)
-
-#define I40IW_QUERY_FPM_TIMERBUCKET_SHIFT 32
-#define I40IW_QUERY_FPM_TIMERBUCKET_MASK \
- (0xffFFULL << I40IW_QUERY_FPM_TIMERBUCKET_SHIFT)
-
-/* Static HMC pages allocated buf */
-#define I40IW_SHMC_PAGE_ALLOCATED_HMC_FN_ID_SHIFT 0
-#define I40IW_SHMC_PAGE_ALLOCATED_HMC_FN_ID_MASK \
- (0x3fUL << I40IW_SHMC_PAGE_ALLOCATED_HMC_FN_ID_SHIFT)
-
-#define I40IW_HW_PAGE_SIZE 4096
-#define I40IW_DONE_COUNT 1000
-#define I40IW_SLEEP_COUNT 10
-
-enum {
- I40IW_QUEUES_ALIGNMENT_MASK = (128 - 1),
- I40IW_AEQ_ALIGNMENT_MASK = (256 - 1),
- I40IW_Q2_ALIGNMENT_MASK = (256 - 1),
- I40IW_CEQ_ALIGNMENT_MASK = (256 - 1),
- I40IW_CQ0_ALIGNMENT_MASK = (256 - 1),
- I40IW_HOST_CTX_ALIGNMENT_MASK = (4 - 1),
- I40IW_SHADOWAREA_MASK = (128 - 1),
- I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK = (4 - 1),
- I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK = (4 - 1)
-};
-
-enum i40iw_alignment {
- I40IW_CQP_ALIGNMENT = 0x200,
- I40IW_AEQ_ALIGNMENT = 0x100,
- I40IW_CEQ_ALIGNMENT = 0x100,
- I40IW_CQ0_ALIGNMENT = 0x100,
- I40IW_SD_BUF_ALIGNMENT = 0x80,
- I40IW_FEATURE_BUF_ALIGNMENT = 0x8
-};
-
-#define I40IW_WQE_SIZE_64 64
-
-#define I40IW_QP_WQE_MIN_SIZE 32
-#define I40IW_QP_WQE_MAX_SIZE 128
-
-#define I40IW_UPDATE_SD_BUF_SIZE 128
-
-#define I40IW_CQE_QTYPE_RQ 0
-#define I40IW_CQE_QTYPE_SQ 1
-
-#define I40IW_RING_INIT(_ring, _size) \
- { \
- (_ring).head = 0; \
- (_ring).tail = 0; \
- (_ring).size = (_size); \
- }
-#define I40IW_RING_GETSIZE(_ring) ((_ring).size)
-#define I40IW_RING_GETCURRENT_HEAD(_ring) ((_ring).head)
-#define I40IW_RING_GETCURRENT_TAIL(_ring) ((_ring).tail)
-
-#define I40IW_RING_MOVE_HEAD(_ring, _retcode) \
- { \
- register u32 size; \
- size = (_ring).size; \
- if (!I40IW_RING_FULL_ERR(_ring)) { \
- (_ring).head = ((_ring).head + 1) % size; \
- (_retcode) = 0; \
- } else { \
- (_retcode) = I40IW_ERR_RING_FULL; \
- } \
- }
-
-#define I40IW_RING_MOVE_HEAD_BY_COUNT(_ring, _count, _retcode) \
- { \
- register u32 size; \
- size = (_ring).size; \
- if ((I40IW_RING_WORK_AVAILABLE(_ring) + (_count)) < size) { \
- (_ring).head = ((_ring).head + (_count)) % size; \
- (_retcode) = 0; \
- } else { \
- (_retcode) = I40IW_ERR_RING_FULL; \
- } \
- }
-
-#define I40IW_RING_MOVE_TAIL(_ring) \
- (_ring).tail = ((_ring).tail + 1) % (_ring).size
-
-#define I40IW_RING_MOVE_HEAD_NOCHECK(_ring) \
- (_ring).head = ((_ring).head + 1) % (_ring).size
-
-#define I40IW_RING_MOVE_TAIL_BY_COUNT(_ring, _count) \
- (_ring).tail = ((_ring).tail + (_count)) % (_ring).size
-
-#define I40IW_RING_SET_TAIL(_ring, _pos) \
- (_ring).tail = (_pos) % (_ring).size
-
-#define I40IW_RING_FULL_ERR(_ring) \
- ( \
- (I40IW_RING_WORK_AVAILABLE(_ring) == ((_ring).size - 1)) \
- )
-
-#define I40IW_ERR_RING_FULL2(_ring) \
- ( \
- (I40IW_RING_WORK_AVAILABLE(_ring) == ((_ring).size - 2)) \
- )
-
-#define I40IW_ERR_RING_FULL3(_ring) \
- ( \
- (I40IW_RING_WORK_AVAILABLE(_ring) == ((_ring).size - 3)) \
- )
-
-#define I40IW_RING_MORE_WORK(_ring) \
- ( \
- (I40IW_RING_WORK_AVAILABLE(_ring) != 0) \
- )
-
-#define I40IW_RING_WORK_AVAILABLE(_ring) \
- ( \
- (((_ring).head + (_ring).size - (_ring).tail) % (_ring).size) \
- )
-
-#define I40IW_RING_GET_WQES_AVAILABLE(_ring) \
- ( \
- ((_ring).size - I40IW_RING_WORK_AVAILABLE(_ring) - 1) \
- )
-
-#define I40IW_ATOMIC_RING_MOVE_HEAD(_ring, index, _retcode) \
- { \
- index = I40IW_RING_GETCURRENT_HEAD(_ring); \
- I40IW_RING_MOVE_HEAD(_ring, _retcode); \
- }
-
-/* Async Events codes */
-#define I40IW_AE_AMP_UNALLOCATED_STAG 0x0102
-#define I40IW_AE_AMP_INVALID_STAG 0x0103
-#define I40IW_AE_AMP_BAD_QP 0x0104
-#define I40IW_AE_AMP_BAD_PD 0x0105
-#define I40IW_AE_AMP_BAD_STAG_KEY 0x0106
-#define I40IW_AE_AMP_BAD_STAG_INDEX 0x0107
-#define I40IW_AE_AMP_BOUNDS_VIOLATION 0x0108
-#define I40IW_AE_AMP_RIGHTS_VIOLATION 0x0109
-#define I40IW_AE_AMP_TO_WRAP 0x010a
-#define I40IW_AE_AMP_FASTREG_SHARED 0x010b
-#define I40IW_AE_AMP_FASTREG_VALID_STAG 0x010c
-#define I40IW_AE_AMP_FASTREG_MW_STAG 0x010d
-#define I40IW_AE_AMP_FASTREG_INVALID_RIGHTS 0x010e
-#define I40IW_AE_AMP_FASTREG_PBL_TABLE_OVERFLOW 0x010f
-#define I40IW_AE_AMP_FASTREG_INVALID_LENGTH 0x0110
-#define I40IW_AE_AMP_INVALIDATE_SHARED 0x0111
-#define I40IW_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS 0x0112
-#define I40IW_AE_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS 0x0113
-#define I40IW_AE_AMP_MWBIND_VALID_STAG 0x0114
-#define I40IW_AE_AMP_MWBIND_OF_MR_STAG 0x0115
-#define I40IW_AE_AMP_MWBIND_TO_ZERO_BASED_STAG 0x0116
-#define I40IW_AE_AMP_MWBIND_TO_MW_STAG 0x0117
-#define I40IW_AE_AMP_MWBIND_INVALID_RIGHTS 0x0118
-#define I40IW_AE_AMP_MWBIND_INVALID_BOUNDS 0x0119
-#define I40IW_AE_AMP_MWBIND_TO_INVALID_PARENT 0x011a
-#define I40IW_AE_AMP_MWBIND_BIND_DISABLED 0x011b
-#define I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG 0x0132
-#define I40IW_AE_UDA_XMIT_DGRAM_TOO_SHORT 0x0134
-#define I40IW_AE_BAD_CLOSE 0x0201
-#define I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE 0x0202
-#define I40IW_AE_CQ_OPERATION_ERROR 0x0203
-#define I40IW_AE_PRIV_OPERATION_DENIED 0x011c
-#define I40IW_AE_RDMA_READ_WHILE_ORD_ZERO 0x0205
-#define I40IW_AE_STAG_ZERO_INVALID 0x0206
-#define I40IW_AE_IB_RREQ_AND_Q1_FULL 0x0207
-#define I40IW_AE_WQE_UNEXPECTED_OPCODE 0x020a
-#define I40IW_AE_WQE_INVALID_PARAMETER 0x020b
-#define I40IW_AE_WQE_LSMM_TOO_LONG 0x0220
-#define I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN 0x0301
-#define I40IW_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER 0x0303
-#define I40IW_AE_DDP_UBE_INVALID_DDP_VERSION 0x0304
-#define I40IW_AE_DDP_UBE_INVALID_MO 0x0305
-#define I40IW_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE 0x0306
-#define I40IW_AE_DDP_UBE_INVALID_QN 0x0307
-#define I40IW_AE_DDP_NO_L_BIT 0x0308
-#define I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION 0x0311
-#define I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE 0x0312
-#define I40IW_AE_ROE_INVALID_RDMA_READ_REQUEST 0x0313
-#define I40IW_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP 0x0314
-#define I40IW_AE_INVALID_ARP_ENTRY 0x0401
-#define I40IW_AE_INVALID_TCP_OPTION_RCVD 0x0402
-#define I40IW_AE_STALE_ARP_ENTRY 0x0403
-#define I40IW_AE_INVALID_MAC_ENTRY 0x0405
-#define I40IW_AE_LLP_CLOSE_COMPLETE 0x0501
-#define I40IW_AE_LLP_CONNECTION_RESET 0x0502
-#define I40IW_AE_LLP_FIN_RECEIVED 0x0503
-#define I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR 0x0505
-#define I40IW_AE_LLP_SEGMENT_TOO_LARGE 0x0506
-#define I40IW_AE_LLP_SEGMENT_TOO_SMALL 0x0507
-#define I40IW_AE_LLP_SYN_RECEIVED 0x0508
-#define I40IW_AE_LLP_TERMINATE_RECEIVED 0x0509
-#define I40IW_AE_LLP_TOO_MANY_RETRIES 0x050a
-#define I40IW_AE_LLP_TOO_MANY_KEEPALIVE_RETRIES 0x050b
-#define I40IW_AE_LLP_DOUBT_REACHABILITY 0x050c
-#define I40IW_AE_LLP_RX_VLAN_MISMATCH 0x050d
-#define I40IW_AE_RESOURCE_EXHAUSTION 0x0520
-#define I40IW_AE_RESET_SENT 0x0601
-#define I40IW_AE_TERMINATE_SENT 0x0602
-#define I40IW_AE_RESET_NOT_SENT 0x0603
-#define I40IW_AE_LCE_QP_CATASTROPHIC 0x0700
-#define I40IW_AE_LCE_FUNCTION_CATASTROPHIC 0x0701
-#define I40IW_AE_LCE_CQ_CATASTROPHIC 0x0702
-#define I40IW_AE_QP_SUSPEND_COMPLETE 0x0900
-
-#define OP_DELETE_LOCAL_MAC_IPADDR_ENTRY 1
-#define OP_CEQ_DESTROY 2
-#define OP_AEQ_DESTROY 3
-#define OP_DELETE_ARP_CACHE_ENTRY 4
-#define OP_MANAGE_APBVT_ENTRY 5
-#define OP_CEQ_CREATE 6
-#define OP_AEQ_CREATE 7
-#define OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY 8
-#define OP_ADD_LOCAL_MAC_IPADDR_ENTRY 9
-#define OP_MANAGE_QHASH_TABLE_ENTRY 10
-#define OP_QP_MODIFY 11
-#define OP_QP_UPLOAD_CONTEXT 12
-#define OP_CQ_CREATE 13
-#define OP_CQ_DESTROY 14
-#define OP_QP_CREATE 15
-#define OP_QP_DESTROY 16
-#define OP_ALLOC_STAG 17
-#define OP_MR_REG_NON_SHARED 18
-#define OP_DEALLOC_STAG 19
-#define OP_MW_ALLOC 20
-#define OP_QP_FLUSH_WQES 21
-#define OP_ADD_ARP_CACHE_ENTRY 22
-#define OP_UPDATE_PE_SDS 23
-#define OP_MANAGE_HMC_PM_FUNC_TABLE 24
-#define OP_SUSPEND 25
-#define OP_RESUME 26
-#define OP_MANAGE_VF_PBLE_BP 27
-#define OP_QUERY_FPM_VALUES 28
-#define OP_COMMIT_FPM_VALUES 29
-#define OP_REQUESTED_COMMANDS 30
-#define OP_COMPLETED_COMMANDS 31
-#define OP_GEN_AE 32
-#define OP_QUERY_RDMA_FEATURES 33
-#define OP_SIZE_CQP_STAT_ARRAY 34
-
-#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hmc.c b/drivers/infiniband/hw/i40iw/i40iw_hmc.c
deleted file mode 100644
index b44bfc1d239b..000000000000
--- a/drivers/infiniband/hw/i40iw/i40iw_hmc.c
+++ /dev/null
@@ -1,821 +0,0 @@
-/*******************************************************************************
-*
-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
-*
-* This software is available to you under a choice of one of two
-* licenses. You may choose to be licensed under the terms of the GNU
-* General Public License (GPL) Version 2, available from the file
-* COPYING in the main directory of this source tree, or the
-* OpenFabrics.org BSD license below:
-*
-* Redistribution and use in source and binary forms, with or
-* without modification, are permitted provided that the following
-* conditions are met:
-*
-* - Redistributions of source code must retain the above
-* copyright notice, this list of conditions and the following
-* disclaimer.
-*
-* - Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials
-* provided with the distribution.
-*
-* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-* SOFTWARE.
-*
-*******************************************************************************/
-
-#include "i40iw_osdep.h"
-#include "i40iw_register.h"
-#include "i40iw_status.h"
-#include "i40iw_hmc.h"
-#include "i40iw_d.h"
-#include "i40iw_type.h"
-#include "i40iw_p.h"
-#include "i40iw_vf.h"
-#include "i40iw_virtchnl.h"
-
-/**
- * i40iw_find_sd_index_limit - finds segment descriptor index limit
- * @hmc_info: pointer to the HMC configuration information structure
- * @type: type of HMC resources we're searching
- * @idx: starting index for the object
- * @cnt: number of objects we're trying to create
- * @sd_idx: pointer to return index of the segment descriptor in question
- * @sd_limit: pointer to return the maximum number of segment descriptors
- *
- * This function calculates the segment descriptor index and index limit
- * for the resource defined by i40iw_hmc_rsrc_type.
- */
-
-static inline void i40iw_find_sd_index_limit(struct i40iw_hmc_info *hmc_info,
- u32 type,
- u32 idx,
- u32 cnt,
- u32 *sd_idx,
- u32 *sd_limit)
-{
- u64 fpm_addr, fpm_limit;
-
- fpm_addr = hmc_info->hmc_obj[(type)].base +
- hmc_info->hmc_obj[type].size * idx;
- fpm_limit = fpm_addr + hmc_info->hmc_obj[type].size * cnt;
- *sd_idx = (u32)(fpm_addr / I40IW_HMC_DIRECT_BP_SIZE);
- *sd_limit = (u32)((fpm_limit - 1) / I40IW_HMC_DIRECT_BP_SIZE);
- *sd_limit += 1;
-}
-
-/**
- * i40iw_find_pd_index_limit - finds page descriptor index limit
- * @hmc_info: pointer to the HMC configuration information struct
- * @type: HMC resource type we're examining
- * @idx: starting index for the object
- * @cnt: number of objects we're trying to create
- * @pd_idx: pointer to return page descriptor index
- * @pd_limit: pointer to return page descriptor index limit
- *
- * Calculates the page descriptor index and index limit for the resource
- * defined by i40iw_hmc_rsrc_type.
- */
-
-static inline void i40iw_find_pd_index_limit(struct i40iw_hmc_info *hmc_info,
- u32 type,
- u32 idx,
- u32 cnt,
- u32 *pd_idx,
- u32 *pd_limit)
-{
- u64 fpm_adr, fpm_limit;
-
- fpm_adr = hmc_info->hmc_obj[type].base +
- hmc_info->hmc_obj[type].size * idx;
- fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt);
- *(pd_idx) = (u32)(fpm_adr / I40IW_HMC_PAGED_BP_SIZE);
- *(pd_limit) = (u32)((fpm_limit - 1) / I40IW_HMC_PAGED_BP_SIZE);
- *(pd_limit) += 1;
-}
-
-/**
- * i40iw_set_sd_entry - setup entry for sd programming
- * @pa: physical addr
- * @idx: sd index
- * @type: paged or direct sd
- * @entry: sd entry ptr
- */
-static inline void i40iw_set_sd_entry(u64 pa,
- u32 idx,
- enum i40iw_sd_entry_type type,
- struct update_sd_entry *entry)
-{
- entry->data = pa | (I40IW_HMC_MAX_BP_COUNT << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |
- (((type == I40IW_SD_TYPE_PAGED) ? 0 : 1) <<
- I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) |
- (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT);
- entry->cmd = (idx | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | (1 << 15));
-}
-
-/**
- * i40iw_clr_sd_entry - setup entry for sd clear
- * @idx: sd index
- * @type: paged or direct sd
- * @entry: sd entry ptr
- */
-static inline void i40iw_clr_sd_entry(u32 idx, enum i40iw_sd_entry_type type,
- struct update_sd_entry *entry)
-{
- entry->data = (I40IW_HMC_MAX_BP_COUNT <<
- I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |
- (((type == I40IW_SD_TYPE_PAGED) ? 0 : 1) <<
- I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT);
- entry->cmd = (idx | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | (1 << 15));
-}
-
-/**
- * i40iw_hmc_sd_one - setup 1 sd entry for cqp
- * @dev: pointer to the device structure
- * @hmc_fn_id: hmc's function id
- * @pa: physical addr
- * @sd_idx: sd index
- * @type: paged or direct sd
- * @setsd: flag to set or clear sd
- */
-enum i40iw_status_code i40iw_hmc_sd_one(struct i40iw_sc_dev *dev,
- u8 hmc_fn_id,
- u64 pa, u32 sd_idx,
- enum i40iw_sd_entry_type type,
- bool setsd)
-{
- struct i40iw_update_sds_info sdinfo;
-
- sdinfo.cnt = 1;
- sdinfo.hmc_fn_id = hmc_fn_id;
- if (setsd)
- i40iw_set_sd_entry(pa, sd_idx, type, sdinfo.entry);
- else
- i40iw_clr_sd_entry(sd_idx, type, sdinfo.entry);
-
- return dev->cqp->process_cqp_sds(dev, &sdinfo);
-}
-
-/**
- * i40iw_hmc_sd_grp - setup group od sd entries for cqp
- * @dev: pointer to the device structure
- * @hmc_info: pointer to the HMC configuration information struct
- * @sd_index: sd index
- * @sd_cnt: number of sd entries
- * @setsd: flag to set or clear sd
- */
-static enum i40iw_status_code i40iw_hmc_sd_grp(struct i40iw_sc_dev *dev,
- struct i40iw_hmc_info *hmc_info,
- u32 sd_index,
- u32 sd_cnt,
- bool setsd)
-{
- struct i40iw_hmc_sd_entry *sd_entry;
- struct i40iw_update_sds_info sdinfo;
- u64 pa;
- u32 i;
- enum i40iw_status_code ret_code = 0;
-
- memset(&sdinfo, 0, sizeof(sdinfo));
- sdinfo.hmc_fn_id = hmc_info->hmc_fn_id;
- for (i = sd_index; i < sd_index + sd_cnt; i++) {
- sd_entry = &hmc_info->sd_table.sd_entry[i];
- if (!sd_entry ||
- (!sd_entry->valid && setsd) ||
- (sd_entry->valid && !setsd))
- continue;
- if (setsd) {
- pa = (sd_entry->entry_type == I40IW_SD_TYPE_PAGED) ?
- sd_entry->u.pd_table.pd_page_addr.pa :
- sd_entry->u.bp.addr.pa;
- i40iw_set_sd_entry(pa, i, sd_entry->entry_type,
- &sdinfo.entry[sdinfo.cnt]);
- } else {
- i40iw_clr_sd_entry(i, sd_entry->entry_type,
- &sdinfo.entry[sdinfo.cnt]);
- }
- sdinfo.cnt++;
- if (sdinfo.cnt == I40IW_MAX_SD_ENTRIES) {
- ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);
- if (ret_code) {
- i40iw_debug(dev, I40IW_DEBUG_HMC,
- "i40iw_hmc_sd_grp: sd_programming failed err=%d\n",
- ret_code);
- return ret_code;
- }
- sdinfo.cnt = 0;
- }
- }
- if (sdinfo.cnt)
- ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);
-
- return ret_code;
-}
-
-/**
- * i40iw_vfdev_from_fpm - return vf dev ptr for hmc function id
- * @dev: pointer to the device structure
- * @hmc_fn_id: hmc's function id
- */
-struct i40iw_vfdev *i40iw_vfdev_from_fpm(struct i40iw_sc_dev *dev, u8 hmc_fn_id)
-{
- struct i40iw_vfdev *vf_dev = NULL;
- u16 idx;
-
- for (idx = 0; idx < I40IW_MAX_PE_ENABLED_VF_COUNT; idx++) {
- if (dev->vf_dev[idx] &&
- ((u8)dev->vf_dev[idx]->pmf_index == hmc_fn_id)) {
- vf_dev = dev->vf_dev[idx];
- break;
- }
- }
- return vf_dev;
-}
-
-/**
- * i40iw_vf_hmcinfo_from_fpm - get ptr to hmc for func_id
- * @dev: pointer to the device structure
- * @hmc_fn_id: hmc's function id
- */
-struct i40iw_hmc_info *i40iw_vf_hmcinfo_from_fpm(struct i40iw_sc_dev *dev,
- u8 hmc_fn_id)
-{
- struct i40iw_hmc_info *hmc_info = NULL;
- u16 idx;
-
- for (idx = 0; idx < I40IW_MAX_PE_ENABLED_VF_COUNT; idx++) {
- if (dev->vf_dev[idx] &&
- ((u8)dev->vf_dev[idx]->pmf_index == hmc_fn_id)) {
- hmc_info = &dev->vf_dev[idx]->hmc_info;
- break;
- }
- }
- return hmc_info;
-}
-
-/**
- * i40iw_hmc_finish_add_sd_reg - program sd entries for objects
- * @dev: pointer to the device structure
- * @info: create obj info
- */
-static enum i40iw_status_code i40iw_hmc_finish_add_sd_reg(struct i40iw_sc_dev *dev,
- struct i40iw_hmc_create_obj_info *info)
-{
- if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
- return I40IW_ERR_INVALID_HMC_OBJ_INDEX;
-
- if ((info->start_idx + info->count) >
- info->hmc_info->hmc_obj[info->rsrc_type].cnt)
- return I40IW_ERR_INVALID_HMC_OBJ_COUNT;
-
- if (!info->add_sd_cnt)
- return 0;
-
- return i40iw_hmc_sd_grp(dev, info->hmc_info,
- info->hmc_info->sd_indexes[0],
- info->add_sd_cnt, true);
-}
-
-/**
- * i40iw_sc_create_hmc_obj - allocate backing store for hmc objects
- * @dev: pointer to the device structure
- * @info: pointer to i40iw_hmc_iw_create_obj_info struct
- *
- * This will allocate memory for PDs and backing pages and populate
- * the sd and pd entries.
- */
-enum i40iw_status_code i40iw_sc_create_hmc_obj(struct i40iw_sc_dev *dev,
- struct i40iw_hmc_create_obj_info *info)
-{
- struct i40iw_hmc_sd_entry *sd_entry;
- u32 sd_idx, sd_lmt;
- u32 pd_idx = 0, pd_lmt = 0;
- u32 pd_idx1 = 0, pd_lmt1 = 0;
- u32 i, j;
- bool pd_error = false;
- enum i40iw_status_code ret_code = 0;
-
- if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
- return I40IW_ERR_INVALID_HMC_OBJ_INDEX;
-
- if ((info->start_idx + info->count) >
- info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
- i40iw_debug(dev, I40IW_DEBUG_HMC,
- "%s: error type %u, start = %u, req cnt %u, cnt = %u\n",
- __func__, info->rsrc_type, info->start_idx, info->count,
- info->hmc_info->hmc_obj[info->rsrc_type].cnt);
- return I40IW_ERR_INVALID_HMC_OBJ_COUNT;
- }
-
- if (!dev->is_pf)
- return i40iw_vchnl_vf_add_hmc_objs(dev, info->rsrc_type, 0, info->count);
-
- i40iw_find_sd_index_limit(info->hmc_info, info->rsrc_type,
- info->start_idx, info->count,
- &sd_idx, &sd_lmt);
- if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
- sd_lmt > info->hmc_info->sd_table.sd_cnt) {
- return I40IW_ERR_INVALID_SD_INDEX;
- }
- i40iw_find_pd_index_limit(info->hmc_info, info->rsrc_type,
- info->start_idx, info->count, &pd_idx, &pd_lmt);
-
- for (j = sd_idx; j < sd_lmt; j++) {
- ret_code = i40iw_add_sd_table_entry(dev->hw, info->hmc_info,
- j,
- info->entry_type,
- I40IW_HMC_DIRECT_BP_SIZE);
- if (ret_code)
- goto exit_sd_error;
- sd_entry = &info->hmc_info->sd_table.sd_entry[j];
-
- if ((sd_entry->entry_type == I40IW_SD_TYPE_PAGED) &&
- ((dev->hmc_info == info->hmc_info) &&
- (info->rsrc_type != I40IW_HMC_IW_PBLE))) {
- pd_idx1 = max(pd_idx, (j * I40IW_HMC_MAX_BP_COUNT));
- pd_lmt1 = min(pd_lmt,
- (j + 1) * I40IW_HMC_MAX_BP_COUNT);
- for (i = pd_idx1; i < pd_lmt1; i++) {
- /* update the pd table entry */
- ret_code = i40iw_add_pd_table_entry(dev->hw, info->hmc_info,
- i, NULL);
- if (ret_code) {
- pd_error = true;
- break;
- }
- }
- if (pd_error) {
- while (i && (i > pd_idx1)) {
- i40iw_remove_pd_bp(dev->hw, info->hmc_info, (i - 1),
- info->is_pf);
- i--;
- }
- }
- }
- if (sd_entry->valid)
- continue;
-
- info->hmc_info->sd_indexes[info->add_sd_cnt] = (u16)j;
- info->add_sd_cnt++;
- sd_entry->valid = true;
- }
- return i40iw_hmc_finish_add_sd_reg(dev, info);
-
-exit_sd_error:
- while (j && (j > sd_idx)) {
- sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
- switch (sd_entry->entry_type) {
- case I40IW_SD_TYPE_PAGED:
- pd_idx1 = max(pd_idx,
- (j - 1) * I40IW_HMC_MAX_BP_COUNT);
- pd_lmt1 = min(pd_lmt, (j * I40IW_HMC_MAX_BP_COUNT));
- for (i = pd_idx1; i < pd_lmt1; i++)
- i40iw_prep_remove_pd_page(info->hmc_info, i);
- break;
- case I40IW_SD_TYPE_DIRECT:
- i40iw_prep_remove_pd_page(info->hmc_info, (j - 1));
- break;
- default:
- ret_code = I40IW_ERR_INVALID_SD_TYPE;
- break;
- }
- j--;
- }
-
- return ret_code;
-}
-
-/**
- * i40iw_finish_del_sd_reg - delete sd entries for objects
- * @dev: pointer to the device structure
- * @info: dele obj info
- * @reset: true if called before reset
- */
-static enum i40iw_status_code i40iw_finish_del_sd_reg(struct i40iw_sc_dev *dev,
- struct i40iw_hmc_del_obj_info *info,
- bool reset)
-{
- struct i40iw_hmc_sd_entry *sd_entry;
- enum i40iw_status_code ret_code = 0;
- u32 i, sd_idx;
- struct i40iw_dma_mem *mem;
-
- if (dev->is_pf && !reset)
- ret_code = i40iw_hmc_sd_grp(dev, info->hmc_info,
- info->hmc_info->sd_indexes[0],
- info->del_sd_cnt, false);
-
- if (ret_code)
- i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error cqp sd sd_grp\n", __func__);
-
- for (i = 0; i < info->del_sd_cnt; i++) {
- sd_idx = info->hmc_info->sd_indexes[i];
- sd_entry = &info->hmc_info->sd_table.sd_entry[sd_idx];
- if (!sd_entry)
- continue;
- mem = (sd_entry->entry_type == I40IW_SD_TYPE_PAGED) ?
- &sd_entry->u.pd_table.pd_page_addr :
- &sd_entry->u.bp.addr;
-
- if (!mem || !mem->va)
- i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error cqp sd mem\n", __func__);
- else
- i40iw_free_dma_mem(dev->hw, mem);
- }
- return ret_code;
-}
-
-/**
- * i40iw_sc_del_hmc_obj - remove pe hmc objects
- * @dev: pointer to the device structure
- * @info: pointer to i40iw_hmc_del_obj_info struct
- * @reset: true if called before reset
- *
- * This will de-populate the SDs and PDs. It frees
- * the memory for PDS and backing storage. After this function is returned,
- * caller should deallocate memory allocated previously for
- * book-keeping information about PDs and backing storage.
- */
-enum i40iw_status_code i40iw_sc_del_hmc_obj(struct i40iw_sc_dev *dev,
- struct i40iw_hmc_del_obj_info *info,
- bool reset)
-{
- struct i40iw_hmc_pd_table *pd_table;
- u32 sd_idx, sd_lmt;
- u32 pd_idx, pd_lmt, rel_pd_idx;
- u32 i, j;
- enum i40iw_status_code ret_code = 0;
-
- if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
- i40iw_debug(dev, I40IW_DEBUG_HMC,
- "%s: error start_idx[%04d] >= [type %04d].cnt[%04d]\n",
- __func__, info->start_idx, info->rsrc_type,
- info->hmc_info->hmc_obj[info->rsrc_type].cnt);
- return I40IW_ERR_INVALID_HMC_OBJ_INDEX;
- }
-
- if ((info->start_idx + info->count) >
- info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
- i40iw_debug(dev, I40IW_DEBUG_HMC,
- "%s: error start_idx[%04d] + count %04d >= [type %04d].cnt[%04d]\n",
- __func__, info->start_idx, info->count,
- info->rsrc_type,
- info->hmc_info->hmc_obj[info->rsrc_type].cnt);
- return I40IW_ERR_INVALID_HMC_OBJ_COUNT;
- }
- if (!dev->is_pf) {
- ret_code = i40iw_vchnl_vf_del_hmc_obj(dev, info->rsrc_type, 0,
- info->count);
- if (info->rsrc_type != I40IW_HMC_IW_PBLE)
- return ret_code;
- }
-
- i40iw_find_pd_index_limit(info->hmc_info, info->rsrc_type,
- info->start_idx, info->count, &pd_idx, &pd_lmt);
-
- for (j = pd_idx; j < pd_lmt; j++) {
- sd_idx = j / I40IW_HMC_PD_CNT_IN_SD;
-
- if (info->hmc_info->sd_table.sd_entry[sd_idx].entry_type !=
- I40IW_SD_TYPE_PAGED)
- continue;
-
- rel_pd_idx = j % I40IW_HMC_PD_CNT_IN_SD;
- pd_table = &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
- if (pd_table->pd_entry[rel_pd_idx].valid) {
- ret_code = i40iw_remove_pd_bp(dev->hw, info->hmc_info, j,
- info->is_pf);
- if (ret_code) {
- i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error\n", __func__);
- return ret_code;
- }
- }
- }
-
- i40iw_find_sd_index_limit(info->hmc_info, info->rsrc_type,
- info->start_idx, info->count, &sd_idx, &sd_lmt);
- if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
- sd_lmt > info->hmc_info->sd_table.sd_cnt) {
- i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error invalid sd_idx\n", __func__);
- return I40IW_ERR_INVALID_SD_INDEX;
- }
-
- for (i = sd_idx; i < sd_lmt; i++) {
- if (!info->hmc_info->sd_table.sd_entry[i].valid)
- continue;
- switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
- case I40IW_SD_TYPE_DIRECT:
- ret_code = i40iw_prep_remove_sd_bp(info->hmc_info, i);
- if (!ret_code) {
- info->hmc_info->sd_indexes[info->del_sd_cnt] = (u16)i;
- info->del_sd_cnt++;
- }
- break;
- case I40IW_SD_TYPE_PAGED:
- ret_code = i40iw_prep_remove_pd_page(info->hmc_info, i);
- if (!ret_code) {
- info->hmc_info->sd_indexes[info->del_sd_cnt] = (u16)i;
- info->del_sd_cnt++;
- }
- break;
- default:
- break;
- }
- }
- return i40iw_finish_del_sd_reg(dev, info, reset);
-}
-
-/**
- * i40iw_add_sd_table_entry - Adds a segment descriptor to the table
- * @hw: pointer to our hw struct
- * @hmc_info: pointer to the HMC configuration information struct
- * @sd_index: segment descriptor index to manipulate
- * @type: what type of segment descriptor we're manipulating
- * @direct_mode_sz: size to alloc in direct mode
- */
-enum i40iw_status_code i40iw_add_sd_table_entry(struct i40iw_hw *hw,
- struct i40iw_hmc_info *hmc_info,
- u32 sd_index,
- enum i40iw_sd_entry_type type,
- u64 direct_mode_sz)
-{
- enum i40iw_status_code ret_code = 0;
- struct i40iw_hmc_sd_entry *sd_entry;
- bool dma_mem_alloc_done = false;
- struct i40iw_dma_mem mem;
- u64 alloc_len;
-
- sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
- if (!sd_entry->valid) {
- if (type == I40IW_SD_TYPE_PAGED)
- alloc_len = I40IW_HMC_PAGED_BP_SIZE;
- else
- alloc_len = direct_mode_sz;
-
- /* allocate a 4K pd page or 2M backing page */
- ret_code = i40iw_allocate_dma_mem(hw, &mem, alloc_len,
- I40IW_HMC_PD_BP_BUF_ALIGNMENT);
- if (ret_code)
- goto exit;
- dma_mem_alloc_done = true;
- if (type == I40IW_SD_TYPE_PAGED) {
- ret_code = i40iw_allocate_virt_mem(hw,
- &sd_entry->u.pd_table.pd_entry_virt_mem,
- sizeof(struct i40iw_hmc_pd_entry) * 512);
- if (ret_code)
- goto exit;
- sd_entry->u.pd_table.pd_entry = (struct i40iw_hmc_pd_entry *)
- sd_entry->u.pd_table.pd_entry_virt_mem.va;
-
- memcpy(&sd_entry->u.pd_table.pd_page_addr, &mem, sizeof(struct i40iw_dma_mem));
- } else {
- memcpy(&sd_entry->u.bp.addr, &mem, sizeof(struct i40iw_dma_mem));
- sd_entry->u.bp.sd_pd_index = sd_index;
- }
-
- hmc_info->sd_table.sd_entry[sd_index].entry_type = type;
-
- I40IW_INC_SD_REFCNT(&hmc_info->sd_table);
- }
- if (sd_entry->entry_type == I40IW_SD_TYPE_DIRECT)
- I40IW_INC_BP_REFCNT(&sd_entry->u.bp);
-exit:
- if (ret_code)
- if (dma_mem_alloc_done)
- i40iw_free_dma_mem(hw, &mem);
-
- return ret_code;
-}
-
-/**
- * i40iw_add_pd_table_entry - Adds page descriptor to the specified table
- * @hw: pointer to our HW structure
- * @hmc_info: pointer to the HMC configuration information structure
- * @pd_index: which page descriptor index to manipulate
- * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.
- *
- * This function:
- * 1. Initializes the pd entry
- * 2. Adds pd_entry in the pd_table
- * 3. Mark the entry valid in i40iw_hmc_pd_entry structure
- * 4. Initializes the pd_entry's ref count to 1
- * assumptions:
- * 1. The memory for pd should be pinned down, physically contiguous and
- * aligned on 4K boundary and zeroed memory.
- * 2. It should be 4K in size.
- */
-enum i40iw_status_code i40iw_add_pd_table_entry(struct i40iw_hw *hw,
- struct i40iw_hmc_info *hmc_info,
- u32 pd_index,
- struct i40iw_dma_mem *rsrc_pg)
-{
- enum i40iw_status_code ret_code = 0;
- struct i40iw_hmc_pd_table *pd_table;
- struct i40iw_hmc_pd_entry *pd_entry;
- struct i40iw_dma_mem mem;
- struct i40iw_dma_mem *page = &mem;
- u32 sd_idx, rel_pd_idx;
- u64 *pd_addr;
- u64 page_desc;
-
- if (pd_index / I40IW_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt)
- return I40IW_ERR_INVALID_PAGE_DESC_INDEX;
-
- sd_idx = (pd_index / I40IW_HMC_PD_CNT_IN_SD);
- if (hmc_info->sd_table.sd_entry[sd_idx].entry_type != I40IW_SD_TYPE_PAGED)
- return 0;
-
- rel_pd_idx = (pd_index % I40IW_HMC_PD_CNT_IN_SD);
- pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
- pd_entry = &pd_table->pd_entry[rel_pd_idx];
- if (!pd_entry->valid) {
- if (rsrc_pg) {
- pd_entry->rsrc_pg = true;
- page = rsrc_pg;
- } else {
- ret_code = i40iw_allocate_dma_mem(hw, page,
- I40IW_HMC_PAGED_BP_SIZE,
- I40IW_HMC_PD_BP_BUF_ALIGNMENT);
- if (ret_code)
- return ret_code;
- pd_entry->rsrc_pg = false;
- }
-
- memcpy(&pd_entry->bp.addr, page, sizeof(struct i40iw_dma_mem));
- pd_entry->bp.sd_pd_index = pd_index;
- pd_entry->bp.entry_type = I40IW_SD_TYPE_PAGED;
- page_desc = page->pa | 0x1;
-
- pd_addr = (u64 *)pd_table->pd_page_addr.va;
- pd_addr += rel_pd_idx;
-
- memcpy(pd_addr, &page_desc, sizeof(*pd_addr));
-
- pd_entry->sd_index = sd_idx;
- pd_entry->valid = true;
- I40IW_INC_PD_REFCNT(pd_table);
- if (hmc_info->hmc_fn_id < I40IW_FIRST_VF_FPM_ID)
- I40IW_INVALIDATE_PF_HMC_PD(hw, sd_idx, rel_pd_idx);
- else if (hw->hmc.hmc_fn_id != hmc_info->hmc_fn_id)
- I40IW_INVALIDATE_VF_HMC_PD(hw, sd_idx, rel_pd_idx,
- hmc_info->hmc_fn_id);
- }
- I40IW_INC_BP_REFCNT(&pd_entry->bp);
-
- return 0;
-}
-
-/**
- * i40iw_remove_pd_bp - remove a backing page from a page descriptor
- * @hw: pointer to our HW structure
- * @hmc_info: pointer to the HMC configuration information structure
- * @idx: the page index
- * @is_pf: distinguishes a VF from a PF
- *
- * This function:
- * 1. Marks the entry in pd table (for paged address mode) or in sd table
- * (for direct address mode) invalid.
- * 2. Write to register PMPDINV to invalidate the backing page in FV cache
- * 3. Decrement the ref count for the pd _entry
- * assumptions:
- * 1. Caller can deallocate the memory used by backing storage after this
- * function returns.
- */
-enum i40iw_status_code i40iw_remove_pd_bp(struct i40iw_hw *hw,
- struct i40iw_hmc_info *hmc_info,
- u32 idx,
- bool is_pf)
-{
- struct i40iw_hmc_pd_entry *pd_entry;
- struct i40iw_hmc_pd_table *pd_table;
- struct i40iw_hmc_sd_entry *sd_entry;
- u32 sd_idx, rel_pd_idx;
- struct i40iw_dma_mem *mem;
- u64 *pd_addr;
-
- sd_idx = idx / I40IW_HMC_PD_CNT_IN_SD;
- rel_pd_idx = idx % I40IW_HMC_PD_CNT_IN_SD;
- if (sd_idx >= hmc_info->sd_table.sd_cnt)
- return I40IW_ERR_INVALID_PAGE_DESC_INDEX;
-
- sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
- if (sd_entry->entry_type != I40IW_SD_TYPE_PAGED)
- return I40IW_ERR_INVALID_SD_TYPE;
-
- pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
- pd_entry = &pd_table->pd_entry[rel_pd_idx];
- I40IW_DEC_BP_REFCNT(&pd_entry->bp);
- if (pd_entry->bp.ref_cnt)
- return 0;
-
- pd_entry->valid = false;
- I40IW_DEC_PD_REFCNT(pd_table);
- pd_addr = (u64 *)pd_table->pd_page_addr.va;
- pd_addr += rel_pd_idx;
- memset(pd_addr, 0, sizeof(u64));
- if (is_pf)
- I40IW_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
- else
- I40IW_INVALIDATE_VF_HMC_PD(hw, sd_idx, idx,
- hmc_info->hmc_fn_id);
-
- if (!pd_entry->rsrc_pg) {
- mem = &pd_entry->bp.addr;
- if (!mem || !mem->va)
- return I40IW_ERR_PARAM;
- i40iw_free_dma_mem(hw, mem);
- }
- if (!pd_table->ref_cnt)
- i40iw_free_virt_mem(hw, &pd_table->pd_entry_virt_mem);
-
- return 0;
-}
-
-/**
- * i40iw_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry
- * @hmc_info: pointer to the HMC configuration information structure
- * @idx: the page index
- */
-enum i40iw_status_code i40iw_prep_remove_sd_bp(struct i40iw_hmc_info *hmc_info, u32 idx)
-{
- struct i40iw_hmc_sd_entry *sd_entry;
-
- sd_entry = &hmc_info->sd_table.sd_entry[idx];
- I40IW_DEC_BP_REFCNT(&sd_entry->u.bp);
- if (sd_entry->u.bp.ref_cnt)
- return I40IW_ERR_NOT_READY;
-
- I40IW_DEC_SD_REFCNT(&hmc_info->sd_table);
- sd_entry->valid = false;
-
- return 0;
-}
-
-/**
- * i40iw_prep_remove_pd_page - Prepares to remove a PD page from sd entry.
- * @hmc_info: pointer to the HMC configuration information structure
- * @idx: segment descriptor index to find the relevant page descriptor
- */
-enum i40iw_status_code i40iw_prep_remove_pd_page(struct i40iw_hmc_info *hmc_info,
- u32 idx)
-{
- struct i40iw_hmc_sd_entry *sd_entry;
-
- sd_entry = &hmc_info->sd_table.sd_entry[idx];
-
- if (sd_entry->u.pd_table.ref_cnt)
- return I40IW_ERR_NOT_READY;
-
- sd_entry->valid = false;
- I40IW_DEC_SD_REFCNT(&hmc_info->sd_table);
-
- return 0;
-}
-
-/**
- * i40iw_pf_init_vfhmc -
- * @vf_cnt_array: array of cnt values of iwarp hmc objects
- * @vf_hmc_fn_id: hmc function id ofr vf driver
- * @dev: pointer to i40iw_dev struct
- *
- * Called by pf driver to initialize hmc_info for vf driver instance.
- */
-enum i40iw_status_code i40iw_pf_init_vfhmc(struct i40iw_sc_dev *dev,
- u8 vf_hmc_fn_id,
- u32 *vf_cnt_array)
-{
- struct i40iw_hmc_info *hmc_info;
- enum i40iw_status_code ret_code = 0;
- u32 i;
-
- if ((vf_hmc_fn_id < I40IW_FIRST_VF_FPM_ID) ||
- (vf_hmc_fn_id >= I40IW_FIRST_VF_FPM_ID +
- I40IW_MAX_PE_ENABLED_VF_COUNT)) {
- i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: invalid vf_hmc_fn_id 0x%x\n",
- __func__, vf_hmc_fn_id);
- return I40IW_ERR_INVALID_HMCFN_ID;
- }
-
- ret_code = i40iw_sc_init_iw_hmc(dev, vf_hmc_fn_id);
- if (ret_code)
- return ret_code;
-
- hmc_info = i40iw_vf_hmcinfo_from_fpm(dev, vf_hmc_fn_id);
-
- for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_MAX; i++)
- if (vf_cnt_array)
- hmc_info->hmc_obj[i].cnt =
- vf_cnt_array[i - I40IW_HMC_IW_QP];
- else
- hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt;
-
- return 0;
-}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hmc.h b/drivers/infiniband/hw/i40iw/i40iw_hmc.h
deleted file mode 100644
index 4c3fdd875621..000000000000
--- a/drivers/infiniband/hw/i40iw/i40iw_hmc.h
+++ /dev/null
@@ -1,241 +0,0 @@
-/*******************************************************************************
-*
-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
-*
-* This software is available to you under a choice of one of two
-* licenses. You may choose to be licensed under the terms of the GNU
-* General Public License (GPL) Version 2, available from the file
-* COPYING in the main directory of this source tree, or the
-* OpenFabrics.org BSD license below:
-*
-* Redistribution and use in source and binary forms, with or
-* without modification, are permitted provided that the following
-* conditions are met:
-*
-* - Redistributions of source code must retain the above
-* copyright notice, this list of conditions and the following
-* disclaimer.
-*
-* - Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials
-* provided with the distribution.
-*
-* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-* SOFTWARE.
-*
-*******************************************************************************/
-
-#ifndef I40IW_HMC_H
-#define I40IW_HMC_H
-
-#include "i40iw_d.h"
-
-struct i40iw_hw;
-enum i40iw_status_code;
-
-#define I40IW_HMC_MAX_BP_COUNT 512
-#define I40IW_MAX_SD_ENTRIES 11
-#define I40IW_HW_DBG_HMC_INVALID_BP_MARK 0xCA
-
-#define I40IW_HMC_INFO_SIGNATURE 0x484D5347
-#define I40IW_HMC_PD_CNT_IN_SD 512
-#define I40IW_HMC_DIRECT_BP_SIZE 0x200000
-#define I40IW_HMC_MAX_SD_COUNT 4096
-#define I40IW_HMC_PAGED_BP_SIZE 4096
-#define I40IW_HMC_PD_BP_BUF_ALIGNMENT 4096
-#define I40IW_FIRST_VF_FPM_ID 16
-#define FPM_MULTIPLIER 1024
-
-#define I40IW_INC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt++)
-#define I40IW_INC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt++)
-#define I40IW_INC_BP_REFCNT(bp) ((bp)->ref_cnt++)
-
-#define I40IW_DEC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt--)
-#define I40IW_DEC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt--)
-#define I40IW_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--)
-
-/**
- * I40IW_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware
- * @hw: pointer to our hw struct
- * @sd_idx: segment descriptor index
- * @pd_idx: page descriptor index
- */
-#define I40IW_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \
- i40iw_wr32((hw), I40E_PFHMC_PDINV, \
- (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
- (0x1 << I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT) | \
- ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
-
-/**
- * I40IW_INVALIDATE_VF_HMC_PD - Invalidates the pd cache in the hardware
- * @hw: pointer to our hw struct
- * @sd_idx: segment descriptor index
- * @pd_idx: page descriptor index
- * @hmc_fn_id: VF's function id
- */
-#define I40IW_INVALIDATE_VF_HMC_PD(hw, sd_idx, pd_idx, hmc_fn_id) \
- i40iw_wr32(hw, I40E_GLHMC_VFPDINV(hmc_fn_id - I40IW_FIRST_VF_FPM_ID), \
- ((sd_idx << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
- (pd_idx << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
-
-struct i40iw_hmc_obj_info {
- u64 base;
- u32 max_cnt;
- u32 cnt;
- u64 size;
-};
-
-enum i40iw_sd_entry_type {
- I40IW_SD_TYPE_INVALID = 0,
- I40IW_SD_TYPE_PAGED = 1,
- I40IW_SD_TYPE_DIRECT = 2
-};
-
-struct i40iw_hmc_bp {
- enum i40iw_sd_entry_type entry_type;
- struct i40iw_dma_mem addr;
- u32 sd_pd_index;
- u32 ref_cnt;
-};
-
-struct i40iw_hmc_pd_entry {
- struct i40iw_hmc_bp bp;
- u32 sd_index;
- bool rsrc_pg;
- bool valid;
-};
-
-struct i40iw_hmc_pd_table {
- struct i40iw_dma_mem pd_page_addr;
- struct i40iw_hmc_pd_entry *pd_entry;
- struct i40iw_virt_mem pd_entry_virt_mem;
- u32 ref_cnt;
- u32 sd_index;
-};
-
-struct i40iw_hmc_sd_entry {
- enum i40iw_sd_entry_type entry_type;
- bool valid;
-
- union {
- struct i40iw_hmc_pd_table pd_table;
- struct i40iw_hmc_bp bp;
- } u;
-};
-
-struct i40iw_hmc_sd_table {
- struct i40iw_virt_mem addr;
- u32 sd_cnt;
- u32 ref_cnt;
- struct i40iw_hmc_sd_entry *sd_entry;
-};
-
-struct i40iw_hmc_info {
- u32 signature;
- u8 hmc_fn_id;
- u16 first_sd_index;
-
- struct i40iw_hmc_obj_info *hmc_obj;
- struct i40iw_virt_mem hmc_obj_virt_mem;
- struct i40iw_hmc_sd_table sd_table;
- u16 sd_indexes[I40IW_HMC_MAX_SD_COUNT];
-};
-
-struct update_sd_entry {
- u64 cmd;
- u64 data;
-};
-
-struct i40iw_update_sds_info {
- u32 cnt;
- u8 hmc_fn_id;
- struct update_sd_entry entry[I40IW_MAX_SD_ENTRIES];
-};
-
-struct i40iw_ccq_cqe_info;
-struct i40iw_hmc_fcn_info {
- void (*callback_fcn)(struct i40iw_sc_dev *, void *,
- struct i40iw_ccq_cqe_info *);
- void *cqp_callback_param;
- u32 vf_id;
- u16 iw_vf_idx;
- bool free_fcn;
-};
-
-enum i40iw_hmc_rsrc_type {
- I40IW_HMC_IW_QP = 0,
- I40IW_HMC_IW_CQ = 1,
- I40IW_HMC_IW_SRQ = 2,
- I40IW_HMC_IW_HTE = 3,
- I40IW_HMC_IW_ARP = 4,
- I40IW_HMC_IW_APBVT_ENTRY = 5,
- I40IW_HMC_IW_MR = 6,
- I40IW_HMC_IW_XF = 7,
- I40IW_HMC_IW_XFFL = 8,
- I40IW_HMC_IW_Q1 = 9,
- I40IW_HMC_IW_Q1FL = 10,
- I40IW_HMC_IW_TIMER = 11,
- I40IW_HMC_IW_FSIMC = 12,
- I40IW_HMC_IW_FSIAV = 13,
- I40IW_HMC_IW_PBLE = 14,
- I40IW_HMC_IW_MAX = 15,
-};
-
-struct i40iw_hmc_create_obj_info {
- struct i40iw_hmc_info *hmc_info;
- struct i40iw_virt_mem add_sd_virt_mem;
- u32 rsrc_type;
- u32 start_idx;
- u32 count;
- u32 add_sd_cnt;
- enum i40iw_sd_entry_type entry_type;
- bool is_pf;
-};
-
-struct i40iw_hmc_del_obj_info {
- struct i40iw_hmc_info *hmc_info;
- struct i40iw_virt_mem del_sd_virt_mem;
- u32 rsrc_type;
- u32 start_idx;
- u32 count;
- u32 del_sd_cnt;
- bool is_pf;
-};
-
-enum i40iw_status_code i40iw_copy_dma_mem(struct i40iw_hw *hw, void *dest_buf,
- struct i40iw_dma_mem *src_mem, u64 src_offset, u64 size);
-enum i40iw_status_code i40iw_sc_create_hmc_obj(struct i40iw_sc_dev *dev,
- struct i40iw_hmc_create_obj_info *info);
-enum i40iw_status_code i40iw_sc_del_hmc_obj(struct i40iw_sc_dev *dev,
- struct i40iw_hmc_del_obj_info *info,
- bool reset);
-enum i40iw_status_code i40iw_hmc_sd_one(struct i40iw_sc_dev *dev, u8 hmc_fn_id,
- u64 pa, u32 sd_idx, enum i40iw_sd_entry_type type,
- bool setsd);
-enum i40iw_status_code i40iw_update_sds_noccq(struct i40iw_sc_dev *dev,
- struct i40iw_update_sds_info *info);
-struct i40iw_vfdev *i40iw_vfdev_from_fpm(struct i40iw_sc_dev *dev, u8 hmc_fn_id);
-struct i40iw_hmc_info *i40iw_vf_hmcinfo_from_fpm(struct i40iw_sc_dev *dev,
- u8 hmc_fn_id);
-enum i40iw_status_code i40iw_add_sd_table_entry(struct i40iw_hw *hw,
- struct i40iw_hmc_info *hmc_info, u32 sd_index,
- enum i40iw_sd_entry_type type, u64 direct_mode_sz);
-enum i40iw_status_code i40iw_add_pd_table_entry(struct i40iw_hw *hw,
- struct i40iw_hmc_info *hmc_info, u32 pd_index,
- struct i40iw_dma_mem *rsrc_pg);
-enum i40iw_status_code i40iw_remove_pd_bp(struct i40iw_hw *hw,
- struct i40iw_hmc_info *hmc_info, u32 idx, bool is_pf);
-enum i40iw_status_code i40iw_prep_remove_sd_bp(struct i40iw_hmc_info *hmc_info, u32 idx);
-enum i40iw_status_code i40iw_prep_remove_pd_page(struct i40iw_hmc_info *hmc_info, u32 idx);
-
-#define ENTER_SHARED_FUNCTION()
-#define EXIT_SHARED_FUNCTION()
-
-#endif /* I40IW_HMC_H */
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
deleted file mode 100644
index d167ac10c751..000000000000
--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c
+++ /dev/null
@@ -1,851 +0,0 @@
-/*******************************************************************************
-*
-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
-*
-* This software is available to you under a choice of one of two
-* licenses. You may choose to be licensed under the terms of the GNU
-* General Public License (GPL) Version 2, available from the file
-* COPYING in the main directory of this source tree, or the
-* OpenFabrics.org BSD license below:
-*
-* Redistribution and use in source and binary forms, with or
-* without modification, are permitted provided that the following
-* conditions are met:
-*
-* - Redistributions of source code must retain the above
-* copyright notice, this list of conditions and the following
-* disclaimer.
-*
-* - Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials
-* provided with the distribution.
-*
-* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-* SOFTWARE.
-*
-*******************************************************************************/
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <linux/if_vlan.h>
-
-#include "i40iw.h"
-
-/**
- * i40iw_initialize_hw_resources - initialize hw resource during open
- * @iwdev: iwarp device
- */
-u32 i40iw_initialize_hw_resources(struct i40iw_device *iwdev)
-{
- unsigned long num_pds;
- u32 resources_size;
- u32 max_mr;
- u32 max_qp;
- u32 max_cq;
- u32 arp_table_size;
- u32 mrdrvbits;
- void *resource_ptr;
-
- max_qp = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt;
- max_cq = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
- max_mr = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt;
- arp_table_size = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_ARP].cnt;
- iwdev->max_cqe = 0xFFFFF;
- num_pds = I40IW_MAX_PDS;
- resources_size = sizeof(struct i40iw_arp_entry) * arp_table_size;
- resources_size += sizeof(unsigned long) * BITS_TO_LONGS(max_qp);
- resources_size += sizeof(unsigned long) * BITS_TO_LONGS(max_mr);
- resources_size += sizeof(unsigned long) * BITS_TO_LONGS(max_cq);
- resources_size += sizeof(unsigned long) * BITS_TO_LONGS(num_pds);
- resources_size += sizeof(unsigned long) * BITS_TO_LONGS(arp_table_size);
- resources_size += sizeof(struct i40iw_qp **) * max_qp;
- iwdev->mem_resources = kzalloc(resources_size, GFP_KERNEL);
-
- if (!iwdev->mem_resources)
- return -ENOMEM;
-
- iwdev->max_qp = max_qp;
- iwdev->max_mr = max_mr;
- iwdev->max_cq = max_cq;
- iwdev->max_pd = num_pds;
- iwdev->arp_table_size = arp_table_size;
- iwdev->arp_table = (struct i40iw_arp_entry *)iwdev->mem_resources;
- resource_ptr = iwdev->mem_resources + (sizeof(struct i40iw_arp_entry) * arp_table_size);
-
- iwdev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY |
- IB_DEVICE_MEM_WINDOW | IB_DEVICE_MEM_MGT_EXTENSIONS;
-
- iwdev->allocated_qps = resource_ptr;
- iwdev->allocated_cqs = &iwdev->allocated_qps[BITS_TO_LONGS(max_qp)];
- iwdev->allocated_mrs = &iwdev->allocated_cqs[BITS_TO_LONGS(max_cq)];
- iwdev->allocated_pds = &iwdev->allocated_mrs[BITS_TO_LONGS(max_mr)];
- iwdev->allocated_arps = &iwdev->allocated_pds[BITS_TO_LONGS(num_pds)];
- iwdev->qp_table = (struct i40iw_qp **)(&iwdev->allocated_arps[BITS_TO_LONGS(arp_table_size)]);
- set_bit(0, iwdev->allocated_mrs);
- set_bit(0, iwdev->allocated_qps);
- set_bit(0, iwdev->allocated_cqs);
- set_bit(0, iwdev->allocated_pds);
- set_bit(0, iwdev->allocated_arps);
-
- /* Following for ILQ/IEQ */
- set_bit(1, iwdev->allocated_qps);
- set_bit(1, iwdev->allocated_cqs);
- set_bit(1, iwdev->allocated_pds);
- set_bit(2, iwdev->allocated_cqs);
- set_bit(2, iwdev->allocated_pds);
-
- spin_lock_init(&iwdev->resource_lock);
- spin_lock_init(&iwdev->qptable_lock);
- /* stag index mask has a minimum of 14 bits */
- mrdrvbits = 24 - max(get_count_order(iwdev->max_mr), 14);
- iwdev->mr_stagmask = ~(((1 << mrdrvbits) - 1) << (32 - mrdrvbits));
- return 0;
-}
-
-/**
- * i40iw_cqp_ce_handler - handle cqp completions
- * @iwdev: iwarp device
- * @arm: flag to arm after completions
- * @cq: cq for cqp completions
- */
-static void i40iw_cqp_ce_handler(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq, bool arm)
-{
- struct i40iw_cqp_request *cqp_request;
- struct i40iw_sc_dev *dev = &iwdev->sc_dev;
- u32 cqe_count = 0;
- struct i40iw_ccq_cqe_info info;
- int ret;
-
- do {
- memset(&info, 0, sizeof(info));
- ret = dev->ccq_ops->ccq_get_cqe_info(cq, &info);
- if (ret)
- break;
- cqp_request = (struct i40iw_cqp_request *)(unsigned long)info.scratch;
- if (info.error)
- i40iw_pr_err("opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\n",
- info.op_code, info.maj_err_code, info.min_err_code);
- if (cqp_request) {
- cqp_request->compl_info.maj_err_code = info.maj_err_code;
- cqp_request->compl_info.min_err_code = info.min_err_code;
- cqp_request->compl_info.op_ret_val = info.op_ret_val;
- cqp_request->compl_info.error = info.error;
-
- if (cqp_request->waiting) {
- cqp_request->request_done = true;
- wake_up(&cqp_request->waitq);
- i40iw_put_cqp_request(&iwdev->cqp, cqp_request);
- } else {
- if (cqp_request->callback_fcn)
- cqp_request->callback_fcn(cqp_request, 1);
- i40iw_put_cqp_request(&iwdev->cqp, cqp_request);
- }
- }
-
- cqe_count++;
- } while (1);
-
- if (arm && cqe_count) {
- i40iw_process_bh(dev);
- dev->ccq_ops->ccq_arm(cq);
- }
-}
-
-/**
- * i40iw_iwarp_ce_handler - handle iwarp completions
- * @iwdev: iwarp device
- * @iwcq: iwarp cq receiving event
- */
-static void i40iw_iwarp_ce_handler(struct i40iw_device *iwdev,
- struct i40iw_sc_cq *iwcq)
-{
- struct i40iw_cq *i40iwcq = iwcq->back_cq;
-
- if (i40iwcq->ibcq.comp_handler)
- i40iwcq->ibcq.comp_handler(&i40iwcq->ibcq,
- i40iwcq->ibcq.cq_context);
-}
-
-/**
- * i40iw_puda_ce_handler - handle puda completion events
- * @iwdev: iwarp device
- * @cq: puda completion q for event
- */
-static void i40iw_puda_ce_handler(struct i40iw_device *iwdev,
- struct i40iw_sc_cq *cq)
-{
- struct i40iw_sc_dev *dev = (struct i40iw_sc_dev *)&iwdev->sc_dev;
- enum i40iw_status_code status;
- u32 compl_error;
-
- do {
- status = i40iw_puda_poll_completion(dev, cq, &compl_error);
- if (status == I40IW_ERR_QUEUE_EMPTY)
- break;
- if (status) {
- i40iw_pr_err("puda status = %d\n", status);
- break;
- }
- if (compl_error) {
- i40iw_pr_err("puda compl_err =0x%x\n", compl_error);
- break;
- }
- } while (1);
-
- dev->ccq_ops->ccq_arm(cq);
-}
-
-/**
- * i40iw_process_ceq - handle ceq for completions
- * @iwdev: iwarp device
- * @ceq: ceq having cq for completion
- */
-void i40iw_process_ceq(struct i40iw_device *iwdev, struct i40iw_ceq *ceq)
-{
- struct i40iw_sc_dev *dev = &iwdev->sc_dev;
- struct i40iw_sc_ceq *sc_ceq;
- struct i40iw_sc_cq *cq;
- bool arm = true;
-
- sc_ceq = &ceq->sc_ceq;
- do {
- cq = dev->ceq_ops->process_ceq(dev, sc_ceq);
- if (!cq)
- break;
-
- if (cq->cq_type == I40IW_CQ_TYPE_CQP)
- i40iw_cqp_ce_handler(iwdev, cq, arm);
- else if (cq->cq_type == I40IW_CQ_TYPE_IWARP)
- i40iw_iwarp_ce_handler(iwdev, cq);
- else if ((cq->cq_type == I40IW_CQ_TYPE_ILQ) ||
- (cq->cq_type == I40IW_CQ_TYPE_IEQ))
- i40iw_puda_ce_handler(iwdev, cq);
- } while (1);
-}
-
-/**
- * i40iw_next_iw_state - modify qp state
- * @iwqp: iwarp qp to modify
- * @state: next state for qp
- * @del_hash: del hash
- * @term: term message
- * @termlen: length of term message
- */
-void i40iw_next_iw_state(struct i40iw_qp *iwqp,
- u8 state,
- u8 del_hash,
- u8 term,
- u8 termlen)
-{
- struct i40iw_modify_qp_info info;
-
- memset(&info, 0, sizeof(info));
- info.next_iwarp_state = state;
- info.remove_hash_idx = del_hash;
- info.cq_num_valid = true;
- info.arp_cache_idx_valid = true;
- info.dont_send_term = true;
- info.dont_send_fin = true;
- info.termlen = termlen;
-
- if (term & I40IWQP_TERM_SEND_TERM_ONLY)
- info.dont_send_term = false;
- if (term & I40IWQP_TERM_SEND_FIN_ONLY)
- info.dont_send_fin = false;
- if (iwqp->sc_qp.term_flags && (state == I40IW_QP_STATE_ERROR))
- info.reset_tcp_conn = true;
- iwqp->hw_iwarp_state = state;
- i40iw_hw_modify_qp(iwqp->iwdev, iwqp, &info, 0);
-}
-
-/**
- * i40iw_process_aeq - handle aeq events
- * @iwdev: iwarp device
- */
-void i40iw_process_aeq(struct i40iw_device *iwdev)
-{
- struct i40iw_sc_dev *dev = &iwdev->sc_dev;
- struct i40iw_aeq *aeq = &iwdev->aeq;
- struct i40iw_sc_aeq *sc_aeq = &aeq->sc_aeq;
- struct i40iw_aeqe_info aeinfo;
- struct i40iw_aeqe_info *info = &aeinfo;
- int ret;
- struct i40iw_qp *iwqp = NULL;
- struct i40iw_sc_cq *cq = NULL;
- struct i40iw_cq *iwcq = NULL;
- struct i40iw_sc_qp *qp = NULL;
- struct i40iw_qp_host_ctx_info *ctx_info = NULL;
- unsigned long flags;
-
- u32 aeqcnt = 0;
-
- if (!sc_aeq->size)
- return;
-
- do {
- memset(info, 0, sizeof(*info));
- ret = dev->aeq_ops->get_next_aeqe(sc_aeq, info);
- if (ret)
- break;
-
- aeqcnt++;
- i40iw_debug(dev, I40IW_DEBUG_AEQ,
- "%s ae_id = 0x%x bool qp=%d qp_id = %d\n",
- __func__, info->ae_id, info->qp, info->qp_cq_id);
- if (info->qp) {
- spin_lock_irqsave(&iwdev->qptable_lock, flags);
- iwqp = iwdev->qp_table[info->qp_cq_id];
- if (!iwqp) {
- spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
- i40iw_debug(dev, I40IW_DEBUG_AEQ,
- "%s qp_id %d is already freed\n",
- __func__, info->qp_cq_id);
- continue;
- }
- i40iw_qp_add_ref(&iwqp->ibqp);
- spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
- qp = &iwqp->sc_qp;
- spin_lock_irqsave(&iwqp->lock, flags);
- iwqp->hw_tcp_state = info->tcp_state;
- iwqp->hw_iwarp_state = info->iwarp_state;
- iwqp->last_aeq = info->ae_id;
- spin_unlock_irqrestore(&iwqp->lock, flags);
- ctx_info = &iwqp->ctx_info;
- ctx_info->err_rq_idx_valid = true;
- } else {
- if (info->ae_id != I40IW_AE_CQ_OPERATION_ERROR)
- continue;
- }
-
- switch (info->ae_id) {
- case I40IW_AE_LLP_FIN_RECEIVED:
- if (qp->term_flags)
- break;
- if (atomic_inc_return(&iwqp->close_timer_started) == 1) {
- iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSE_WAIT;
- if ((iwqp->hw_tcp_state == I40IW_TCP_STATE_CLOSE_WAIT) &&
- (iwqp->ibqp_state == IB_QPS_RTS)) {
- i40iw_next_iw_state(iwqp,
- I40IW_QP_STATE_CLOSING, 0, 0, 0);
- i40iw_cm_disconn(iwqp);
- }
- iwqp->cm_id->add_ref(iwqp->cm_id);
- i40iw_schedule_cm_timer(iwqp->cm_node,
- (struct i40iw_puda_buf *)iwqp,
- I40IW_TIMER_TYPE_CLOSE, 1, 0);
- }
- break;
- case I40IW_AE_LLP_CLOSE_COMPLETE:
- if (qp->term_flags)
- i40iw_terminate_done(qp, 0);
- else
- i40iw_cm_disconn(iwqp);
- break;
- case I40IW_AE_BAD_CLOSE:
- case I40IW_AE_RESET_SENT:
- i40iw_next_iw_state(iwqp, I40IW_QP_STATE_ERROR, 1, 0, 0);
- i40iw_cm_disconn(iwqp);
- break;
- case I40IW_AE_LLP_CONNECTION_RESET:
- if (atomic_read(&iwqp->close_timer_started))
- break;
- i40iw_cm_disconn(iwqp);
- break;
- case I40IW_AE_QP_SUSPEND_COMPLETE:
- i40iw_qp_suspend_resume(dev, &iwqp->sc_qp, false);
- break;
- case I40IW_AE_TERMINATE_SENT:
- i40iw_terminate_send_fin(qp);
- break;
- case I40IW_AE_LLP_TERMINATE_RECEIVED:
- i40iw_terminate_received(qp, info);
- break;
- case I40IW_AE_CQ_OPERATION_ERROR:
- i40iw_pr_err("Processing an iWARP related AE for CQ misc = 0x%04X\n",
- info->ae_id);
- cq = (struct i40iw_sc_cq *)(unsigned long)info->compl_ctx;
- iwcq = (struct i40iw_cq *)cq->back_cq;
-
- if (iwcq->ibcq.event_handler) {
- struct ib_event ibevent;
-
- ibevent.device = iwcq->ibcq.device;
- ibevent.event = IB_EVENT_CQ_ERR;
- ibevent.element.cq = &iwcq->ibcq;
- iwcq->ibcq.event_handler(&ibevent, iwcq->ibcq.cq_context);
- }
- break;
- case I40IW_AE_LLP_DOUBT_REACHABILITY:
- break;
- case I40IW_AE_PRIV_OPERATION_DENIED:
- case I40IW_AE_STAG_ZERO_INVALID:
- case I40IW_AE_IB_RREQ_AND_Q1_FULL:
- case I40IW_AE_DDP_UBE_INVALID_DDP_VERSION:
- case I40IW_AE_DDP_UBE_INVALID_MO:
- case I40IW_AE_DDP_UBE_INVALID_QN:
- case I40IW_AE_DDP_NO_L_BIT:
- case I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
- case I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
- case I40IW_AE_ROE_INVALID_RDMA_READ_REQUEST:
- case I40IW_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
- case I40IW_AE_INVALID_ARP_ENTRY:
- case I40IW_AE_INVALID_TCP_OPTION_RCVD:
- case I40IW_AE_STALE_ARP_ENTRY:
- case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR:
- case I40IW_AE_LLP_SEGMENT_TOO_SMALL:
- case I40IW_AE_LLP_SYN_RECEIVED:
- case I40IW_AE_LLP_TOO_MANY_RETRIES:
- case I40IW_AE_LCE_QP_CATASTROPHIC:
- case I40IW_AE_LCE_FUNCTION_CATASTROPHIC:
- case I40IW_AE_LCE_CQ_CATASTROPHIC:
- case I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG:
- case I40IW_AE_UDA_XMIT_DGRAM_TOO_SHORT:
- ctx_info->err_rq_idx_valid = false;
- fallthrough;
- default:
- if (!info->sq && ctx_info->err_rq_idx_valid) {
- ctx_info->err_rq_idx = info->wqe_idx;
- ctx_info->tcp_info_valid = false;
- ctx_info->iwarp_info_valid = false;
- ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
- iwqp->host_ctx.va,
- ctx_info);
- }
- i40iw_terminate_connection(qp, info);
- break;
- }
- if (info->qp)
- i40iw_qp_rem_ref(&iwqp->ibqp);
- } while (1);
-
- if (aeqcnt)
- dev->aeq_ops->repost_aeq_entries(dev, aeqcnt);
-}
-
-/**
- * i40iw_cqp_manage_abvpt_cmd - send cqp command manage abpvt
- * @iwdev: iwarp device
- * @accel_local_port: port for apbvt
- * @add_port: add or delete port
- */
-static enum i40iw_status_code
-i40iw_cqp_manage_abvpt_cmd(struct i40iw_device *iwdev,
- u16 accel_local_port,
- bool add_port)
-{
- struct i40iw_apbvt_info *info;
- struct i40iw_cqp_request *cqp_request;
- struct cqp_commands_info *cqp_info;
- enum i40iw_status_code status;
-
- cqp_request = i40iw_get_cqp_request(&iwdev->cqp, add_port);
- if (!cqp_request)
- return I40IW_ERR_NO_MEMORY;
-
- cqp_info = &cqp_request->info;
- info = &cqp_info->in.u.manage_apbvt_entry.info;
-
- memset(info, 0, sizeof(*info));
- info->add = add_port;
- info->port = cpu_to_le16(accel_local_port);
-
- cqp_info->cqp_cmd = OP_MANAGE_APBVT_ENTRY;
- cqp_info->post_sq = 1;
- cqp_info->in.u.manage_apbvt_entry.cqp = &iwdev->cqp.sc_cqp;
- cqp_info->in.u.manage_apbvt_entry.scratch = (uintptr_t)cqp_request;
- status = i40iw_handle_cqp_op(iwdev, cqp_request);
- if (status)
- i40iw_pr_err("CQP-OP Manage APBVT entry fail");
-
- return status;
-}
-
-/**
- * i40iw_manage_apbvt - add or delete tcp port
- * @iwdev: iwarp device
- * @accel_local_port: port for apbvt
- * @add_port: add or delete port
- */
-enum i40iw_status_code i40iw_manage_apbvt(struct i40iw_device *iwdev,
- u16 accel_local_port,
- bool add_port)
-{
- struct i40iw_cm_core *cm_core = &iwdev->cm_core;
- enum i40iw_status_code status;
- unsigned long flags;
- bool in_use;
-
- /* apbvt_lock is held across CQP delete APBVT OP (non-waiting) to
- * protect against race where add APBVT CQP can race ahead of the delete
- * APBVT for same port.
- */
- if (add_port) {
- spin_lock_irqsave(&cm_core->apbvt_lock, flags);
- in_use = __test_and_set_bit(accel_local_port,
- cm_core->ports_in_use);
- spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
- if (in_use)
- return 0;
- return i40iw_cqp_manage_abvpt_cmd(iwdev, accel_local_port,
- true);
- } else {
- spin_lock_irqsave(&cm_core->apbvt_lock, flags);
- in_use = i40iw_port_in_use(cm_core, accel_local_port);
- if (in_use) {
- spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
- return 0;
- }
- __clear_bit(accel_local_port, cm_core->ports_in_use);
- status = i40iw_cqp_manage_abvpt_cmd(iwdev, accel_local_port,
- false);
- spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
- return status;
- }
-}
-
-/**
- * i40iw_manage_arp_cache - manage hw arp cache
- * @iwdev: iwarp device
- * @mac_addr: mac address ptr
- * @ip_addr: ip addr for arp cache
- * @ipv4: flag indicating IPv4 when true
- * @action: add, delete or modify
- */
-void i40iw_manage_arp_cache(struct i40iw_device *iwdev,
- unsigned char *mac_addr,
- u32 *ip_addr,
- bool ipv4,
- u32 action)
-{
- struct i40iw_add_arp_cache_entry_info *info;
- struct i40iw_cqp_request *cqp_request;
- struct cqp_commands_info *cqp_info;
- int arp_index;
-
- arp_index = i40iw_arp_table(iwdev, ip_addr, ipv4, mac_addr, action);
- if (arp_index < 0)
- return;
- cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
- if (!cqp_request)
- return;
-
- cqp_info = &cqp_request->info;
- if (action == I40IW_ARP_ADD) {
- cqp_info->cqp_cmd = OP_ADD_ARP_CACHE_ENTRY;
- info = &cqp_info->in.u.add_arp_cache_entry.info;
- memset(info, 0, sizeof(*info));
- info->arp_index = cpu_to_le16((u16)arp_index);
- info->permanent = true;
- ether_addr_copy(info->mac_addr, mac_addr);
- cqp_info->in.u.add_arp_cache_entry.scratch = (uintptr_t)cqp_request;
- cqp_info->in.u.add_arp_cache_entry.cqp = &iwdev->cqp.sc_cqp;
- } else {
- cqp_info->cqp_cmd = OP_DELETE_ARP_CACHE_ENTRY;
- cqp_info->in.u.del_arp_cache_entry.scratch = (uintptr_t)cqp_request;
- cqp_info->in.u.del_arp_cache_entry.cqp = &iwdev->cqp.sc_cqp;
- cqp_info->in.u.del_arp_cache_entry.arp_index = arp_index;
- }
-
- cqp_info->in.u.add_arp_cache_entry.cqp = &iwdev->cqp.sc_cqp;
- cqp_info->in.u.add_arp_cache_entry.scratch = (uintptr_t)cqp_request;
- cqp_info->post_sq = 1;
- if (i40iw_handle_cqp_op(iwdev, cqp_request))
- i40iw_pr_err("CQP-OP Add/Del Arp Cache entry fail");
-}
-
-/**
- * i40iw_send_syn_cqp_callback - do syn/ack after qhash
- * @cqp_request: qhash cqp completion
- * @send_ack: flag send ack
- */
-static void i40iw_send_syn_cqp_callback(struct i40iw_cqp_request *cqp_request, u32 send_ack)
-{
- i40iw_send_syn(cqp_request->param, send_ack);
-}
-
-/**
- * i40iw_manage_qhash - add or modify qhash
- * @iwdev: iwarp device
- * @cminfo: cm info for qhash
- * @etype: type (syn or quad)
- * @mtype: type of qhash
- * @cmnode: cmnode associated with connection
- * @wait: wait for completion
- */
-enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
- struct i40iw_cm_info *cminfo,
- enum i40iw_quad_entry_type etype,
- enum i40iw_quad_hash_manage_type mtype,
- void *cmnode,
- bool wait)
-{
- struct i40iw_qhash_table_info *info;
- struct i40iw_sc_dev *dev = &iwdev->sc_dev;
- struct i40iw_sc_vsi *vsi = &iwdev->vsi;
- enum i40iw_status_code status;
- struct i40iw_cqp *iwcqp = &iwdev->cqp;
- struct i40iw_cqp_request *cqp_request;
- struct cqp_commands_info *cqp_info;
-
- cqp_request = i40iw_get_cqp_request(iwcqp, wait);
- if (!cqp_request)
- return I40IW_ERR_NO_MEMORY;
- cqp_info = &cqp_request->info;
- info = &cqp_info->in.u.manage_qhash_table_entry.info;
- memset(info, 0, sizeof(*info));
-
- info->vsi = &iwdev->vsi;
- info->manage = mtype;
- info->entry_type = etype;
- if (cminfo->vlan_id != 0xFFFF) {
- info->vlan_valid = true;
- info->vlan_id = cpu_to_le16(cminfo->vlan_id);
- } else {
- info->vlan_valid = false;
- }
-
- info->ipv4_valid = cminfo->ipv4;
- info->user_pri = cminfo->user_pri;
- ether_addr_copy(info->mac_addr, iwdev->netdev->dev_addr);
- info->qp_num = cpu_to_le32(vsi->ilq->qp_id);
- info->dest_port = cpu_to_le16(cminfo->loc_port);
- info->dest_ip[0] = cpu_to_le32(cminfo->loc_addr[0]);
- info->dest_ip[1] = cpu_to_le32(cminfo->loc_addr[1]);
- info->dest_ip[2] = cpu_to_le32(cminfo->loc_addr[2]);
- info->dest_ip[3] = cpu_to_le32(cminfo->loc_addr[3]);
- if (etype == I40IW_QHASH_TYPE_TCP_ESTABLISHED) {
- info->src_port = cpu_to_le16(cminfo->rem_port);
- info->src_ip[0] = cpu_to_le32(cminfo->rem_addr[0]);
- info->src_ip[1] = cpu_to_le32(cminfo->rem_addr[1]);
- info->src_ip[2] = cpu_to_le32(cminfo->rem_addr[2]);
- info->src_ip[3] = cpu_to_le32(cminfo->rem_addr[3]);
- }
- if (cmnode) {
- cqp_request->callback_fcn = i40iw_send_syn_cqp_callback;
- cqp_request->param = (void *)cmnode;
- }
-
- if (info->ipv4_valid)
- i40iw_debug(dev, I40IW_DEBUG_CM,
- "%s:%s IP=%pI4, port=%d, mac=%pM, vlan_id=%d\n",
- __func__, (!mtype) ? "DELETE" : "ADD",
- info->dest_ip,
- info->dest_port, info->mac_addr, cminfo->vlan_id);
- else
- i40iw_debug(dev, I40IW_DEBUG_CM,
- "%s:%s IP=%pI6, port=%d, mac=%pM, vlan_id=%d\n",
- __func__, (!mtype) ? "DELETE" : "ADD",
- info->dest_ip,
- info->dest_port, info->mac_addr, cminfo->vlan_id);
- cqp_info->in.u.manage_qhash_table_entry.cqp = &iwdev->cqp.sc_cqp;
- cqp_info->in.u.manage_qhash_table_entry.scratch = (uintptr_t)cqp_request;
- cqp_info->cqp_cmd = OP_MANAGE_QHASH_TABLE_ENTRY;
- cqp_info->post_sq = 1;
- status = i40iw_handle_cqp_op(iwdev, cqp_request);
- if (status)
- i40iw_pr_err("CQP-OP Manage Qhash Entry fail");
- return status;
-}
-
-/**
- * i40iw_hw_flush_wqes - flush qp's wqe
- * @iwdev: iwarp device
- * @qp: hardware control qp
- * @info: info for flush
- * @wait: flag wait for completion
- */
-enum i40iw_status_code i40iw_hw_flush_wqes(struct i40iw_device *iwdev,
- struct i40iw_sc_qp *qp,
- struct i40iw_qp_flush_info *info,
- bool wait)
-{
- enum i40iw_status_code status;
- struct i40iw_qp_flush_info *hw_info;
- struct i40iw_cqp_request *cqp_request;
- struct cqp_commands_info *cqp_info;
- struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp;
-
- cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait);
- if (!cqp_request)
- return I40IW_ERR_NO_MEMORY;
-
- cqp_info = &cqp_request->info;
- hw_info = &cqp_request->info.in.u.qp_flush_wqes.info;
- memcpy(hw_info, info, sizeof(*hw_info));
-
- cqp_info->cqp_cmd = OP_QP_FLUSH_WQES;
- cqp_info->post_sq = 1;
- cqp_info->in.u.qp_flush_wqes.qp = qp;
- cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)cqp_request;
- status = i40iw_handle_cqp_op(iwdev, cqp_request);
- if (status) {
- i40iw_pr_err("CQP-OP Flush WQE's fail");
- complete(&iwqp->sq_drained);
- complete(&iwqp->rq_drained);
- return status;
- }
- if (!cqp_request->compl_info.maj_err_code) {
- switch (cqp_request->compl_info.min_err_code) {
- case I40IW_CQP_COMPL_RQ_WQE_FLUSHED:
- complete(&iwqp->sq_drained);
- break;
- case I40IW_CQP_COMPL_SQ_WQE_FLUSHED:
- complete(&iwqp->rq_drained);
- break;
- case I40IW_CQP_COMPL_RQ_SQ_WQE_FLUSHED:
- break;
- default:
- complete(&iwqp->sq_drained);
- complete(&iwqp->rq_drained);
- break;
- }
- }
-
- return 0;
-}
-
-/**
- * i40iw_gen_ae - generate AE
- * @iwdev: iwarp device
- * @qp: qp associated with AE
- * @info: info for ae
- * @wait: wait for completion
- */
-void i40iw_gen_ae(struct i40iw_device *iwdev,
- struct i40iw_sc_qp *qp,
- struct i40iw_gen_ae_info *info,
- bool wait)
-{
- struct i40iw_gen_ae_info *ae_info;
- struct i40iw_cqp_request *cqp_request;
- struct cqp_commands_info *cqp_info;
-
- cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait);
- if (!cqp_request)
- return;
-
- cqp_info = &cqp_request->info;
- ae_info = &cqp_request->info.in.u.gen_ae.info;
- memcpy(ae_info, info, sizeof(*ae_info));
-
- cqp_info->cqp_cmd = OP_GEN_AE;
- cqp_info->post_sq = 1;
- cqp_info->in.u.gen_ae.qp = qp;
- cqp_info->in.u.gen_ae.scratch = (uintptr_t)cqp_request;
- if (i40iw_handle_cqp_op(iwdev, cqp_request))
- i40iw_pr_err("CQP OP failed attempting to generate ae_code=0x%x\n",
- info->ae_code);
-}
-
-/**
- * i40iw_hw_manage_vf_pble_bp - manage vf pbles
- * @iwdev: iwarp device
- * @info: info for managing pble
- * @wait: flag wait for completion
- */
-enum i40iw_status_code i40iw_hw_manage_vf_pble_bp(struct i40iw_device *iwdev,
- struct i40iw_manage_vf_pble_info *info,
- bool wait)
-{
- enum i40iw_status_code status;
- struct i40iw_manage_vf_pble_info *hw_info;
- struct i40iw_cqp_request *cqp_request;
- struct cqp_commands_info *cqp_info;
-
- if ((iwdev->init_state < CCQ_CREATED) && wait)
- wait = false;
-
- cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait);
- if (!cqp_request)
- return I40IW_ERR_NO_MEMORY;
-
- cqp_info = &cqp_request->info;
- hw_info = &cqp_request->info.in.u.manage_vf_pble_bp.info;
- memcpy(hw_info, info, sizeof(*hw_info));
-
- cqp_info->cqp_cmd = OP_MANAGE_VF_PBLE_BP;
- cqp_info->post_sq = 1;
- cqp_info->in.u.manage_vf_pble_bp.cqp = &iwdev->cqp.sc_cqp;
- cqp_info->in.u.manage_vf_pble_bp.scratch = (uintptr_t)cqp_request;
- status = i40iw_handle_cqp_op(iwdev, cqp_request);
- if (status)
- i40iw_pr_err("CQP-OP Manage VF pble_bp fail");
- return status;
-}
-
-/**
- * i40iw_get_ib_wc - return change flush code to IB's
- * @opcode: iwarp flush code
- */
-static enum ib_wc_status i40iw_get_ib_wc(enum i40iw_flush_opcode opcode)
-{
- switch (opcode) {
- case FLUSH_PROT_ERR:
- return IB_WC_LOC_PROT_ERR;
- case FLUSH_REM_ACCESS_ERR:
- return IB_WC_REM_ACCESS_ERR;
- case FLUSH_LOC_QP_OP_ERR:
- return IB_WC_LOC_QP_OP_ERR;
- case FLUSH_REM_OP_ERR:
- return IB_WC_REM_OP_ERR;
- case FLUSH_LOC_LEN_ERR:
- return IB_WC_LOC_LEN_ERR;
- case FLUSH_GENERAL_ERR:
- return IB_WC_GENERAL_ERR;
- case FLUSH_FATAL_ERR:
- default:
- return IB_WC_FATAL_ERR;
- }
-}
-
-/**
- * i40iw_set_flush_info - set flush info
- * @pinfo: set flush info
- * @min: minor err
- * @maj: major err
- * @opcode: flush error code
- */
-static void i40iw_set_flush_info(struct i40iw_qp_flush_info *pinfo,
- u16 *min,
- u16 *maj,
- enum i40iw_flush_opcode opcode)
-{
- *min = (u16)i40iw_get_ib_wc(opcode);
- *maj = CQE_MAJOR_DRV;
- pinfo->userflushcode = true;
-}
-
-/**
- * i40iw_flush_wqes - flush wqe for qp
- * @iwdev: iwarp device
- * @iwqp: qp to flush wqes
- */
-void i40iw_flush_wqes(struct i40iw_device *iwdev, struct i40iw_qp *iwqp)
-{
- struct i40iw_qp_flush_info info;
- struct i40iw_qp_flush_info *pinfo = &info;
-
- struct i40iw_sc_qp *qp = &iwqp->sc_qp;
-
- memset(pinfo, 0, sizeof(*pinfo));
- info.sq = true;
- info.rq = true;
- if (qp->term_flags) {
- i40iw_set_flush_info(pinfo, &pinfo->sq_minor_code,
- &pinfo->sq_major_code, qp->flush_code);
- i40iw_set_flush_info(pinfo, &pinfo->rq_minor_code,
- &pinfo->rq_major_code, qp->flush_code);
- }
- (void)i40iw_hw_flush_wqes(iwdev, &iwqp->sc_qp, &info, true);
-}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
deleted file mode 100644
index 364f69cd620f..000000000000
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ /dev/null
@@ -1,2064 +0,0 @@
-/*******************************************************************************
-*
-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
-*
-* This software is available to you under a choice of one of two
-* licenses. You may choose to be licensed under the terms of the GNU
-* General Public License (GPL) Version 2, available from the file
-* COPYING in the main directory of this source tree, or the
-* OpenFabrics.org BSD license below:
-*
-* Redistribution and use in source and binary forms, with or
-* without modification, are permitted provided that the following
-* conditions are met:
-*
-* - Redistributions of source code must retain the above
-* copyright notice, this list of conditions and the following
-* disclaimer.
-*
-* - Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials
-* provided with the distribution.
-*
-* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-* SOFTWARE.
-*
-*******************************************************************************/
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <linux/if_vlan.h>
-#include <net/addrconf.h>
-
-#include "i40iw.h"
-#include "i40iw_register.h"
-#include <net/netevent.h>
-#define CLIENT_IW_INTERFACE_VERSION_MAJOR 0
-#define CLIENT_IW_INTERFACE_VERSION_MINOR 01
-#define CLIENT_IW_INTERFACE_VERSION_BUILD 00
-
-#define DRV_VERSION_MAJOR 0
-#define DRV_VERSION_MINOR 5
-#define DRV_VERSION_BUILD 123
-#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
- __stringify(DRV_VERSION_MINOR) "." __stringify(DRV_VERSION_BUILD)
-
-static int debug;
-module_param(debug, int, 0644);
-MODULE_PARM_DESC(debug, "debug flags: 0=disabled (default), 0x7fffffff=all");
-
-static int resource_profile;
-module_param(resource_profile, int, 0644);
-MODULE_PARM_DESC(resource_profile,
- "Resource Profile: 0=no VF RDMA support (default), 1=Weighted VF, 2=Even Distribution");
-
-static int max_rdma_vfs = 32;
-module_param(max_rdma_vfs, int, 0644);
-MODULE_PARM_DESC(max_rdma_vfs, "Maximum VF count: 0-32 32=default");
-static int mpa_version = 2;
-module_param(mpa_version, int, 0644);
-MODULE_PARM_DESC(mpa_version, "MPA version to be used in MPA Req/Resp 1 or 2");
-
-MODULE_AUTHOR("Intel Corporation, <e1000-rdma@lists.sourceforge.net>");
-MODULE_DESCRIPTION("Intel(R) Ethernet Connection X722 iWARP RDMA Driver");
-MODULE_LICENSE("Dual BSD/GPL");
-
-static struct i40e_client i40iw_client;
-static char i40iw_client_name[I40E_CLIENT_STR_LENGTH] = "i40iw";
-
-static LIST_HEAD(i40iw_handlers);
-static DEFINE_SPINLOCK(i40iw_handler_lock);
-
-static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev,
- u32 vf_id, u8 *msg, u16 len);
-
-static struct notifier_block i40iw_inetaddr_notifier = {
- .notifier_call = i40iw_inetaddr_event
-};
-
-static struct notifier_block i40iw_inetaddr6_notifier = {
- .notifier_call = i40iw_inet6addr_event
-};
-
-static struct notifier_block i40iw_net_notifier = {
- .notifier_call = i40iw_net_event
-};
-
-static struct notifier_block i40iw_netdevice_notifier = {
- .notifier_call = i40iw_netdevice_event
-};
-
-/**
- * i40iw_find_i40e_handler - find a handler given a client info
- * @ldev: pointer to a client info
- */
-static struct i40iw_handler *i40iw_find_i40e_handler(struct i40e_info *ldev)
-{
- struct i40iw_handler *hdl;
- unsigned long flags;
-
- spin_lock_irqsave(&i40iw_handler_lock, flags);
- list_for_each_entry(hdl, &i40iw_handlers, list) {
- if (hdl->ldev.netdev == ldev->netdev) {
- spin_unlock_irqrestore(&i40iw_handler_lock, flags);
- return hdl;
- }
- }
- spin_unlock_irqrestore(&i40iw_handler_lock, flags);
- return NULL;
-}
-
-/**
- * i40iw_find_netdev - find a handler given a netdev
- * @netdev: pointer to net_device
- */
-struct i40iw_handler *i40iw_find_netdev(struct net_device *netdev)
-{
- struct i40iw_handler *hdl;
- unsigned long flags;
-
- spin_lock_irqsave(&i40iw_handler_lock, flags);
- list_for_each_entry(hdl, &i40iw_handlers, list) {
- if (hdl->ldev.netdev == netdev) {
- spin_unlock_irqrestore(&i40iw_handler_lock, flags);
- return hdl;
- }
- }
- spin_unlock_irqrestore(&i40iw_handler_lock, flags);
- return NULL;
-}
-
-/**
- * i40iw_add_handler - add a handler to the list
- * @hdl: handler to be added to the handler list
- */
-static void i40iw_add_handler(struct i40iw_handler *hdl)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&i40iw_handler_lock, flags);
- list_add(&hdl->list, &i40iw_handlers);
- spin_unlock_irqrestore(&i40iw_handler_lock, flags);
-}
-
-/**
- * i40iw_del_handler - delete a handler from the list
- * @hdl: handler to be deleted from the handler list
- */
-static int i40iw_del_handler(struct i40iw_handler *hdl)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&i40iw_handler_lock, flags);
- list_del(&hdl->list);
- spin_unlock_irqrestore(&i40iw_handler_lock, flags);
- return 0;
-}
-
-/**
- * i40iw_enable_intr - set up device interrupts
- * @dev: hardware control device structure
- * @msix_id: id of the interrupt to be enabled
- */
-static void i40iw_enable_intr(struct i40iw_sc_dev *dev, u32 msix_id)
-{
- u32 val;
-
- val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
- I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
- (3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
- if (dev->is_pf)
- i40iw_wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_id - 1), val);
- else
- i40iw_wr32(dev->hw, I40E_VFINT_DYN_CTLN1(msix_id - 1), val);
-}
-
-/**
- * i40iw_dpc - tasklet for aeq and ceq 0
- * @t: Timer context to fetch pointer to iwarp device
- */
-static void i40iw_dpc(struct tasklet_struct *t)
-{
- struct i40iw_device *iwdev = from_tasklet(iwdev, t, dpc_tasklet);
-
- if (iwdev->msix_shared)
- i40iw_process_ceq(iwdev, iwdev->ceqlist);
- i40iw_process_aeq(iwdev);
- i40iw_enable_intr(&iwdev->sc_dev, iwdev->iw_msixtbl[0].idx);
-}
-
-/**
- * i40iw_ceq_dpc - dpc handler for CEQ
- * @t: Timer context to fetch pointer to CEQ data
- */
-static void i40iw_ceq_dpc(struct tasklet_struct *t)
-{
- struct i40iw_ceq *iwceq = from_tasklet(iwceq, t, dpc_tasklet);
- struct i40iw_device *iwdev = iwceq->iwdev;
-
- i40iw_process_ceq(iwdev, iwceq);
- i40iw_enable_intr(&iwdev->sc_dev, iwceq->msix_idx);
-}
-
-/**
- * i40iw_irq_handler - interrupt handler for aeq and ceq0
- * @irq: Interrupt request number
- * @data: iwarp device
- */
-static irqreturn_t i40iw_irq_handler(int irq, void *data)
-{
- struct i40iw_device *iwdev = (struct i40iw_device *)data;
-
- tasklet_schedule(&iwdev->dpc_tasklet);
- return IRQ_HANDLED;
-}
-
-/**
- * i40iw_destroy_cqp - destroy control qp
- * @iwdev: iwarp device
- * @free_hwcqp: 1 if CQP should be destroyed
- *
- * Issue destroy cqp request and
- * free the resources associated with the cqp
- */
-static void i40iw_destroy_cqp(struct i40iw_device *iwdev, bool free_hwcqp)
-{
- struct i40iw_sc_dev *dev = &iwdev->sc_dev;
- struct i40iw_cqp *cqp = &iwdev->cqp;
-
- if (free_hwcqp)
- dev->cqp_ops->cqp_destroy(dev->cqp);
-
- i40iw_cleanup_pending_cqp_op(iwdev);
-
- i40iw_free_dma_mem(dev->hw, &cqp->sq);
- kfree(cqp->scratch_array);
- iwdev->cqp.scratch_array = NULL;
-
- kfree(cqp->cqp_requests);
- cqp->cqp_requests = NULL;
-}
-
-/**
- * i40iw_disable_irq - disable device interrupts
- * @dev: hardware control device structure
- * @msix_vec: msix vector to disable irq
- * @dev_id: parameter to pass to free_irq (used during irq setup)
- *
- * The function is called when destroying aeq/ceq
- */
-static void i40iw_disable_irq(struct i40iw_sc_dev *dev,
- struct i40iw_msix_vector *msix_vec,
- void *dev_id)
-{
- if (dev->is_pf)
- i40iw_wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_vec->idx - 1), 0);
- else
- i40iw_wr32(dev->hw, I40E_VFINT_DYN_CTLN1(msix_vec->idx - 1), 0);
- irq_set_affinity_hint(msix_vec->irq, NULL);
- free_irq(msix_vec->irq, dev_id);
-}
-
-/**
- * i40iw_destroy_aeq - destroy aeq
- * @iwdev: iwarp device
- *
- * Issue a destroy aeq request and
- * free the resources associated with the aeq
- * The function is called during driver unload
- */
-static void i40iw_destroy_aeq(struct i40iw_device *iwdev)
-{
- enum i40iw_status_code status = I40IW_ERR_NOT_READY;
- struct i40iw_sc_dev *dev = &iwdev->sc_dev;
- struct i40iw_aeq *aeq = &iwdev->aeq;
-
- if (!iwdev->msix_shared)
- i40iw_disable_irq(dev, iwdev->iw_msixtbl, (void *)iwdev);
- if (iwdev->reset)
- goto exit;
-
- if (!dev->aeq_ops->aeq_destroy(&aeq->sc_aeq, 0, 1))
- status = dev->aeq_ops->aeq_destroy_done(&aeq->sc_aeq);
- if (status)
- i40iw_pr_err("destroy aeq failed %d\n", status);
-
-exit:
- i40iw_free_dma_mem(dev->hw, &aeq->mem);
-}
-
-/**
- * i40iw_destroy_ceq - destroy ceq
- * @iwdev: iwarp device
- * @iwceq: ceq to be destroyed
- *
- * Issue a destroy ceq request and
- * free the resources associated with the ceq
- */
-static void i40iw_destroy_ceq(struct i40iw_device *iwdev,
- struct i40iw_ceq *iwceq)
-{
- enum i40iw_status_code status;
- struct i40iw_sc_dev *dev = &iwdev->sc_dev;
-
- if (iwdev->reset)
- goto exit;
-
- status = dev->ceq_ops->ceq_destroy(&iwceq->sc_ceq, 0, 1);
- if (status) {
- i40iw_pr_err("ceq destroy command failed %d\n", status);
- goto exit;
- }
-
- status = dev->ceq_ops->cceq_destroy_done(&iwceq->sc_ceq);
- if (status)
- i40iw_pr_err("ceq destroy completion failed %d\n", status);
-exit:
- i40iw_free_dma_mem(dev->hw, &iwceq->mem);
-}
-
-/**
- * i40iw_dele_ceqs - destroy all ceq's
- * @iwdev: iwarp device
- *
- * Go through all of the device ceq's and for each ceq
- * disable the ceq interrupt and destroy the ceq
- */
-static void i40iw_dele_ceqs(struct i40iw_device *iwdev)
-{
- u32 i = 0;
- struct i40iw_sc_dev *dev = &iwdev->sc_dev;
- struct i40iw_ceq *iwceq = iwdev->ceqlist;
- struct i40iw_msix_vector *msix_vec = iwdev->iw_msixtbl;
-
- if (iwdev->msix_shared) {
- i40iw_disable_irq(dev, msix_vec, (void *)iwdev);
- i40iw_destroy_ceq(iwdev, iwceq);
- iwceq++;
- i++;
- }
-
- for (msix_vec++; i < iwdev->ceqs_count; i++, msix_vec++, iwceq++) {
- i40iw_disable_irq(dev, msix_vec, (void *)iwceq);
- i40iw_destroy_ceq(iwdev, iwceq);
- }
-
- iwdev->sc_dev.ceq_valid = false;
-}
-
-/**
- * i40iw_destroy_ccq - destroy control cq
- * @iwdev: iwarp device
- *
- * Issue destroy ccq request and
- * free the resources associated with the ccq
- */
-static void i40iw_destroy_ccq(struct i40iw_device *iwdev)
-{
- struct i40iw_sc_dev *dev = &iwdev->sc_dev;
- struct i40iw_ccq *ccq = &iwdev->ccq;
- enum i40iw_status_code status = 0;
-
- if (!iwdev->reset)
- status = dev->ccq_ops->ccq_destroy(dev->ccq, 0, true);
- if (status)
- i40iw_pr_err("ccq destroy failed %d\n", status);
- i40iw_free_dma_mem(dev->hw, &ccq->mem_cq);
-}
-
-/* types of hmc objects */
-static enum i40iw_hmc_rsrc_type iw_hmc_obj_types[] = {
- I40IW_HMC_IW_QP,
- I40IW_HMC_IW_CQ,
- I40IW_HMC_IW_HTE,
- I40IW_HMC_IW_ARP,
- I40IW_HMC_IW_APBVT_ENTRY,
- I40IW_HMC_IW_MR,
- I40IW_HMC_IW_XF,
- I40IW_HMC_IW_XFFL,
- I40IW_HMC_IW_Q1,
- I40IW_HMC_IW_Q1FL,
- I40IW_HMC_IW_TIMER,
-};
-
-/**
- * i40iw_close_hmc_objects_type - delete hmc objects of a given type
- * @dev: iwarp device
- * @obj_type: the hmc object type to be deleted
- * @hmc_info: pointer to the HMC configuration information
- * @is_pf: true if the function is PF otherwise false
- * @reset: true if called before reset
- */
-static void i40iw_close_hmc_objects_type(struct i40iw_sc_dev *dev,
- enum i40iw_hmc_rsrc_type obj_type,
- struct i40iw_hmc_info *hmc_info,
- bool is_pf,
- bool reset)
-{
- struct i40iw_hmc_del_obj_info info;
-
- memset(&info, 0, sizeof(info));
- info.hmc_info = hmc_info;
- info.rsrc_type = obj_type;
- info.count = hmc_info->hmc_obj[obj_type].cnt;
- info.is_pf = is_pf;
- if (dev->hmc_ops->del_hmc_object(dev, &info, reset))
- i40iw_pr_err("del obj of type %d failed\n", obj_type);
-}
-
-/**
- * i40iw_del_hmc_objects - remove all device hmc objects
- * @dev: iwarp device
- * @hmc_info: hmc_info to free
- * @is_pf: true if hmc_info belongs to PF, not vf nor allocated
- * by PF on behalf of VF
- * @reset: true if called before reset
- */
-static void i40iw_del_hmc_objects(struct i40iw_sc_dev *dev,
- struct i40iw_hmc_info *hmc_info,
- bool is_pf,
- bool reset)
-{
- unsigned int i;
-
- for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++)
- i40iw_close_hmc_objects_type(dev, iw_hmc_obj_types[i], hmc_info, is_pf, reset);
-}
-
-/**
- * i40iw_ceq_handler - interrupt handler for ceq
- * @irq: interrupt request number
- * @data: ceq pointer
- */
-static irqreturn_t i40iw_ceq_handler(int irq, void *data)
-{
- struct i40iw_ceq *iwceq = (struct i40iw_ceq *)data;
-
- if (iwceq->irq != irq)
- i40iw_pr_err("expected irq = %d received irq = %d\n", iwceq->irq, irq);
- tasklet_schedule(&iwceq->dpc_tasklet);
- return IRQ_HANDLED;
-}
-
-/**
- * i40iw_create_hmc_obj_type - create hmc object of a given type
- * @dev: hardware control device structure
- * @info: information for the hmc object to create
- */
-static enum i40iw_status_code i40iw_create_hmc_obj_type(struct i40iw_sc_dev *dev,
- struct i40iw_hmc_create_obj_info *info)
-{
- return dev->hmc_ops->create_hmc_object(dev, info);
-}
-
-/**
- * i40iw_create_hmc_objs - create all hmc objects for the device
- * @iwdev: iwarp device
- * @is_pf: true if the function is PF otherwise false
- *
- * Create the device hmc objects and allocate hmc pages
- * Return 0 if successful, otherwise clean up and return error
- */
-static enum i40iw_status_code i40iw_create_hmc_objs(struct i40iw_device *iwdev,
- bool is_pf)
-{
- struct i40iw_sc_dev *dev = &iwdev->sc_dev;
- struct i40iw_hmc_create_obj_info info;
- enum i40iw_status_code status;
- int i;
-
- memset(&info, 0, sizeof(info));
- info.hmc_info = dev->hmc_info;
- info.is_pf = is_pf;
- info.entry_type = iwdev->sd_type;
- for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
- info.rsrc_type = iw_hmc_obj_types[i];
- info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt;
- info.add_sd_cnt = 0;
- status = i40iw_create_hmc_obj_type(dev, &info);
- if (status) {
- i40iw_pr_err("create obj type %d status = %d\n",
- iw_hmc_obj_types[i], status);
- break;
- }
- }
- if (!status)
- return (dev->cqp_misc_ops->static_hmc_pages_allocated(dev->cqp, 0,
- dev->hmc_fn_id,
- true, true));
-
- while (i) {
- i--;
- /* destroy the hmc objects of a given type */
- i40iw_close_hmc_objects_type(dev,
- iw_hmc_obj_types[i],
- dev->hmc_info,
- is_pf,
- false);
- }
- return status;
-}
-
-/**
- * i40iw_obj_aligned_mem - get aligned memory from device allocated memory
- * @iwdev: iwarp device
- * @memptr: points to the memory addresses
- * @size: size of memory needed
- * @mask: mask for the aligned memory
- *
- * Get aligned memory of the requested size and
- * update the memptr to point to the new aligned memory
- * Return 0 if successful, otherwise return no memory error
- */
-enum i40iw_status_code i40iw_obj_aligned_mem(struct i40iw_device *iwdev,
- struct i40iw_dma_mem *memptr,
- u32 size,
- u32 mask)
-{
- unsigned long va, newva;
- unsigned long extra;
-
- va = (unsigned long)iwdev->obj_next.va;
- newva = va;
- if (mask)
- newva = ALIGN(va, (mask + 1));
- extra = newva - va;
- memptr->va = (u8 *)va + extra;
- memptr->pa = iwdev->obj_next.pa + extra;
- memptr->size = size;
- if ((memptr->va + size) > (iwdev->obj_mem.va + iwdev->obj_mem.size))
- return I40IW_ERR_NO_MEMORY;
-
- iwdev->obj_next.va = memptr->va + size;
- iwdev->obj_next.pa = memptr->pa + size;
- return 0;
-}
-
-/**
- * i40iw_create_cqp - create control qp
- * @iwdev: iwarp device
- *
- * Return 0, if the cqp and all the resources associated with it
- * are successfully created, otherwise return error
- */
-static enum i40iw_status_code i40iw_create_cqp(struct i40iw_device *iwdev)
-{
- enum i40iw_status_code status;
- u32 sqsize = I40IW_CQP_SW_SQSIZE_2048;
- struct i40iw_dma_mem mem;
- struct i40iw_sc_dev *dev = &iwdev->sc_dev;
- struct i40iw_cqp_init_info cqp_init_info;
- struct i40iw_cqp *cqp = &iwdev->cqp;
- u16 maj_err, min_err;
- int i;
-
- cqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests), GFP_KERNEL);
- if (!cqp->cqp_requests)
- return I40IW_ERR_NO_MEMORY;
- cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL);
- if (!cqp->scratch_array) {
- kfree(cqp->cqp_requests);
- return I40IW_ERR_NO_MEMORY;
- }
- dev->cqp = &cqp->sc_cqp;
- dev->cqp->dev = dev;
- memset(&cqp_init_info, 0, sizeof(cqp_init_info));
- status = i40iw_allocate_dma_mem(dev->hw, &cqp->sq,
- (sizeof(struct i40iw_cqp_sq_wqe) * sqsize),
- I40IW_CQP_ALIGNMENT);
- if (status)
- goto exit;
- status = i40iw_obj_aligned_mem(iwdev, &mem, sizeof(struct i40iw_cqp_ctx),
- I40IW_HOST_CTX_ALIGNMENT_MASK);
- if (status)
- goto exit;
- dev->cqp->host_ctx_pa = mem.pa;
- dev->cqp->host_ctx = mem.va;
- /* populate the cqp init info */
- cqp_init_info.dev = dev;
- cqp_init_info.sq_size = sqsize;
- cqp_init_info.sq = cqp->sq.va;
- cqp_init_info.sq_pa = cqp->sq.pa;
- cqp_init_info.host_ctx_pa = mem.pa;
- cqp_init_info.host_ctx = mem.va;
- cqp_init_info.hmc_profile = iwdev->resource_profile;
- cqp_init_info.enabled_vf_count = iwdev->max_rdma_vfs;
- cqp_init_info.scratch_array = cqp->scratch_array;
- status = dev->cqp_ops->cqp_init(dev->cqp, &cqp_init_info);
- if (status) {
- i40iw_pr_err("cqp init status %d\n", status);
- goto exit;
- }
- status = dev->cqp_ops->cqp_create(dev->cqp, &maj_err, &min_err);
- if (status) {
- i40iw_pr_err("cqp create status %d maj_err %d min_err %d\n",
- status, maj_err, min_err);
- goto exit;
- }
- spin_lock_init(&cqp->req_lock);
- INIT_LIST_HEAD(&cqp->cqp_avail_reqs);
- INIT_LIST_HEAD(&cqp->cqp_pending_reqs);
- /* init the waitq of the cqp_requests and add them to the list */
- for (i = 0; i < sqsize; i++) {
- init_waitqueue_head(&cqp->cqp_requests[i].waitq);
- list_add_tail(&cqp->cqp_requests[i].list, &cqp->cqp_avail_reqs);
- }
- return 0;
-exit:
- /* clean up the created resources */
- i40iw_destroy_cqp(iwdev, false);
- return status;
-}
-
-/**
- * i40iw_create_ccq - create control cq
- * @iwdev: iwarp device
- *
- * Return 0, if the ccq and the resources associated with it
- * are successfully created, otherwise return error
- */
-static enum i40iw_status_code i40iw_create_ccq(struct i40iw_device *iwdev)
-{
- struct i40iw_sc_dev *dev = &iwdev->sc_dev;
- struct i40iw_dma_mem mem;
- enum i40iw_status_code status;
- struct i40iw_ccq_init_info info;
- struct i40iw_ccq *ccq = &iwdev->ccq;
-
- memset(&info, 0, sizeof(info));
- dev->ccq = &ccq->sc_cq;
- dev->ccq->dev = dev;
- info.dev = dev;
- ccq->shadow_area.size = sizeof(struct i40iw_cq_shadow_area);
- ccq->mem_cq.size = sizeof(struct i40iw_cqe) * IW_CCQ_SIZE;
- status = i40iw_allocate_dma_mem(dev->hw, &ccq->mem_cq,
- ccq->mem_cq.size, I40IW_CQ0_ALIGNMENT);
- if (status)
- goto exit;
- status = i40iw_obj_aligned_mem(iwdev, &mem, ccq->shadow_area.size,
- I40IW_SHADOWAREA_MASK);
- if (status)
- goto exit;
- ccq->sc_cq.back_cq = (void *)ccq;
- /* populate the ccq init info */
- info.cq_base = ccq->mem_cq.va;
- info.cq_pa = ccq->mem_cq.pa;
- info.num_elem = IW_CCQ_SIZE;
- info.shadow_area = mem.va;
- info.shadow_area_pa = mem.pa;
- info.ceqe_mask = false;
- info.ceq_id_valid = true;
- info.shadow_read_threshold = 16;
- status = dev->ccq_ops->ccq_init(dev->ccq, &info);
- if (!status)
- status = dev->ccq_ops->ccq_create(dev->ccq, 0, true, true);
-exit:
- if (status)
- i40iw_free_dma_mem(dev->hw, &ccq->mem_cq);
- return status;
-}
-
-/**
- * i40iw_configure_ceq_vector - set up the msix interrupt vector for ceq
- * @iwdev: iwarp device
- * @msix_vec: interrupt vector information
- * @iwceq: ceq associated with the vector
- * @ceq_id: the id number of the iwceq
- *
- * Allocate interrupt resources and enable irq handling
- * Return 0 if successful, otherwise return error
- */
-static enum i40iw_status_code i40iw_configure_ceq_vector(struct i40iw_device *iwdev,
- struct i40iw_ceq *iwceq,
- u32 ceq_id,
- struct i40iw_msix_vector *msix_vec)
-{
- enum i40iw_status_code status;
-
- if (iwdev->msix_shared && !ceq_id) {
- tasklet_setup(&iwdev->dpc_tasklet, i40iw_dpc);
- status = request_irq(msix_vec->irq, i40iw_irq_handler, 0, "AEQCEQ", iwdev);
- } else {
- tasklet_setup(&iwceq->dpc_tasklet, i40iw_ceq_dpc);
- status = request_irq(msix_vec->irq, i40iw_ceq_handler, 0, "CEQ", iwceq);
- }
-
- cpumask_clear(&msix_vec->mask);
- cpumask_set_cpu(msix_vec->cpu_affinity, &msix_vec->mask);
- irq_set_affinity_hint(msix_vec->irq, &msix_vec->mask);
-
- if (status) {
- i40iw_pr_err("ceq irq config fail\n");
- return I40IW_ERR_CONFIG;
- }
- msix_vec->ceq_id = ceq_id;
-
- return 0;
-}
-
-/**
- * i40iw_create_ceq - create completion event queue
- * @iwdev: iwarp device
- * @iwceq: pointer to the ceq resources to be created
- * @ceq_id: the id number of the iwceq
- *
- * Return 0, if the ceq and the resources associated with it
- * are successfully created, otherwise return error
- */
-static enum i40iw_status_code i40iw_create_ceq(struct i40iw_device *iwdev,
- struct i40iw_ceq *iwceq,
- u32 ceq_id)
-{
- enum i40iw_status_code status;
- struct i40iw_ceq_init_info info;
- struct i40iw_sc_dev *dev = &iwdev->sc_dev;
- u64 scratch;
-
- memset(&info, 0, sizeof(info));
- info.ceq_id = ceq_id;
- iwceq->iwdev = iwdev;
- iwceq->mem.size = sizeof(struct i40iw_ceqe) *
- iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
- status = i40iw_allocate_dma_mem(dev->hw, &iwceq->mem, iwceq->mem.size,
- I40IW_CEQ_ALIGNMENT);
- if (status)
- goto exit;
- info.ceq_id = ceq_id;
- info.ceqe_base = iwceq->mem.va;
- info.ceqe_pa = iwceq->mem.pa;
-
- info.elem_cnt = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
- iwceq->sc_ceq.ceq_id = ceq_id;
- info.dev = dev;
- scratch = (uintptr_t)&iwdev->cqp.sc_cqp;
- status = dev->ceq_ops->ceq_init(&iwceq->sc_ceq, &info);
- if (!status)
- status = dev->ceq_ops->cceq_create(&iwceq->sc_ceq, scratch);
-
-exit:
- if (status)
- i40iw_free_dma_mem(dev->hw, &iwceq->mem);
- return status;
-}
-
-void i40iw_request_reset(struct i40iw_device *iwdev)
-{
- struct i40e_info *ldev = iwdev->ldev;
-
- ldev->ops->request_reset(ldev, iwdev->client, 1);
-}
-
-/**
- * i40iw_setup_ceqs - manage the device ceq's and their interrupt resources
- * @iwdev: iwarp device
- * @ldev: i40e lan device
- *
- * Allocate a list for all device completion event queues
- * Create the ceq's and configure their msix interrupt vectors
- * Return 0, if at least one ceq is successfully set up, otherwise return error
- */
-static enum i40iw_status_code i40iw_setup_ceqs(struct i40iw_device *iwdev,
- struct i40e_info *ldev)
-{
- u32 i;
- u32 ceq_id;
- struct i40iw_ceq *iwceq;
- struct i40iw_msix_vector *msix_vec;
- enum i40iw_status_code status = 0;
- u32 num_ceqs;
-
- if (ldev && ldev->ops && ldev->ops->setup_qvlist) {
- status = ldev->ops->setup_qvlist(ldev, &i40iw_client,
- iwdev->iw_qvlist);
- if (status)
- goto exit;
- } else {
- status = I40IW_ERR_BAD_PTR;
- goto exit;
- }
-
- num_ceqs = min(iwdev->msix_count, iwdev->sc_dev.hmc_fpm_misc.max_ceqs);
- iwdev->ceqlist = kcalloc(num_ceqs, sizeof(*iwdev->ceqlist), GFP_KERNEL);
- if (!iwdev->ceqlist) {
- status = I40IW_ERR_NO_MEMORY;
- goto exit;
- }
- i = (iwdev->msix_shared) ? 0 : 1;
- for (ceq_id = 0; i < num_ceqs; i++, ceq_id++) {
- iwceq = &iwdev->ceqlist[ceq_id];
- status = i40iw_create_ceq(iwdev, iwceq, ceq_id);
- if (status) {
- i40iw_pr_err("create ceq status = %d\n", status);
- break;
- }
-
- msix_vec = &iwdev->iw_msixtbl[i];
- iwceq->irq = msix_vec->irq;
- iwceq->msix_idx = msix_vec->idx;
- status = i40iw_configure_ceq_vector(iwdev, iwceq, ceq_id, msix_vec);
- if (status) {
- i40iw_destroy_ceq(iwdev, iwceq);
- break;
- }
- i40iw_enable_intr(&iwdev->sc_dev, msix_vec->idx);
- iwdev->ceqs_count++;
- }
-exit:
- if (status && !iwdev->ceqs_count) {
- kfree(iwdev->ceqlist);
- iwdev->ceqlist = NULL;
- return status;
- } else {
- iwdev->sc_dev.ceq_valid = true;
- return 0;
- }
-
-}
-
-/**
- * i40iw_configure_aeq_vector - set up the msix vector for aeq
- * @iwdev: iwarp device
- *
- * Allocate interrupt resources and enable irq handling
- * Return 0 if successful, otherwise return error
- */
-static enum i40iw_status_code i40iw_configure_aeq_vector(struct i40iw_device *iwdev)
-{
- struct i40iw_msix_vector *msix_vec = iwdev->iw_msixtbl;
- u32 ret = 0;
-
- if (!iwdev->msix_shared) {
- tasklet_setup(&iwdev->dpc_tasklet, i40iw_dpc);
- ret = request_irq(msix_vec->irq, i40iw_irq_handler, 0, "i40iw", iwdev);
- }
- if (ret) {
- i40iw_pr_err("aeq irq config fail\n");
- return I40IW_ERR_CONFIG;
- }
-
- return 0;
-}
-
-/**
- * i40iw_create_aeq - create async event queue
- * @iwdev: iwarp device
- *
- * Return 0, if the aeq and the resources associated with it
- * are successfully created, otherwise return error
- */
-static enum i40iw_status_code i40iw_create_aeq(struct i40iw_device *iwdev)
-{
- enum i40iw_status_code status;
- struct i40iw_aeq_init_info info;
- struct i40iw_sc_dev *dev = &iwdev->sc_dev;
- struct i40iw_aeq *aeq = &iwdev->aeq;
- u64 scratch = 0;
- u32 aeq_size;
-
- aeq_size = 2 * iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt +
- iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
- memset(&info, 0, sizeof(info));
- aeq->mem.size = sizeof(struct i40iw_sc_aeqe) * aeq_size;
- status = i40iw_allocate_dma_mem(dev->hw, &aeq->mem, aeq->mem.size,
- I40IW_AEQ_ALIGNMENT);
- if (status)
- goto exit;
-
- info.aeqe_base = aeq->mem.va;
- info.aeq_elem_pa = aeq->mem.pa;
- info.elem_cnt = aeq_size;
- info.dev = dev;
- status = dev->aeq_ops->aeq_init(&aeq->sc_aeq, &info);
- if (status)
- goto exit;
- status = dev->aeq_ops->aeq_create(&aeq->sc_aeq, scratch, 1);
- if (!status)
- status = dev->aeq_ops->aeq_create_done(&aeq->sc_aeq);
-exit:
- if (status)
- i40iw_free_dma_mem(dev->hw, &aeq->mem);
- return status;
-}
-
-/**
- * i40iw_setup_aeq - set up the device aeq
- * @iwdev: iwarp device
- *
- * Create the aeq and configure its msix interrupt vector
- * Return 0 if successful, otherwise return error
- */
-static enum i40iw_status_code i40iw_setup_aeq(struct i40iw_device *iwdev)
-{
- struct i40iw_sc_dev *dev = &iwdev->sc_dev;
- enum i40iw_status_code status;
-
- status = i40iw_create_aeq(iwdev);
- if (status)
- return status;
-
- status = i40iw_configure_aeq_vector(iwdev);
- if (status) {
- i40iw_destroy_aeq(iwdev);
- return status;
- }
-
- if (!iwdev->msix_shared)
- i40iw_enable_intr(dev, iwdev->iw_msixtbl[0].idx);
- return 0;
-}
-
-/**
- * i40iw_initialize_ilq - create iwarp local queue for cm
- * @iwdev: iwarp device
- *
- * Return 0 if successful, otherwise return error
- */
-static enum i40iw_status_code i40iw_initialize_ilq(struct i40iw_device *iwdev)
-{
- struct i40iw_puda_rsrc_info info;
- enum i40iw_status_code status;
-
- memset(&info, 0, sizeof(info));
- info.type = I40IW_PUDA_RSRC_TYPE_ILQ;
- info.cq_id = 1;
- info.qp_id = 0;
- info.count = 1;
- info.pd_id = 1;
- info.sq_size = 8192;
- info.rq_size = 8192;
- info.buf_size = 1024;
- info.tx_buf_cnt = 16384;
- info.receive = i40iw_receive_ilq;
- info.xmit_complete = i40iw_free_sqbuf;
- status = i40iw_puda_create_rsrc(&iwdev->vsi, &info);
- if (status)
- i40iw_pr_err("ilq create fail\n");
- return status;
-}
-
-/**
- * i40iw_initialize_ieq - create iwarp exception queue
- * @iwdev: iwarp device
- *
- * Return 0 if successful, otherwise return error
- */
-static enum i40iw_status_code i40iw_initialize_ieq(struct i40iw_device *iwdev)
-{
- struct i40iw_puda_rsrc_info info;
- enum i40iw_status_code status;
-
- memset(&info, 0, sizeof(info));
- info.type = I40IW_PUDA_RSRC_TYPE_IEQ;
- info.cq_id = 2;
- info.qp_id = iwdev->vsi.exception_lan_queue;
- info.count = 1;
- info.pd_id = 2;
- info.sq_size = 8192;
- info.rq_size = 8192;
- info.buf_size = iwdev->vsi.mtu + VLAN_ETH_HLEN;
- info.tx_buf_cnt = 4096;
- status = i40iw_puda_create_rsrc(&iwdev->vsi, &info);
- if (status)
- i40iw_pr_err("ieq create fail\n");
- return status;
-}
-
-/**
- * i40iw_reinitialize_ieq - destroy and re-create ieq
- * @dev: iwarp device
- */
-void i40iw_reinitialize_ieq(struct i40iw_sc_dev *dev)
-{
- struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
-
- i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, false);
- if (i40iw_initialize_ieq(iwdev)) {
- iwdev->reset = true;
- i40iw_request_reset(iwdev);
- }
-}
-
-/**
- * i40iw_hmc_setup - create hmc objects for the device
- * @iwdev: iwarp device
- *
- * Set up the device private memory space for the number and size of
- * the hmc objects and create the objects
- * Return 0 if successful, otherwise return error
- */
-static enum i40iw_status_code i40iw_hmc_setup(struct i40iw_device *iwdev)
-{
- enum i40iw_status_code status;
-
- iwdev->sd_type = I40IW_SD_TYPE_DIRECT;
- status = i40iw_config_fpm_values(&iwdev->sc_dev, IW_CFG_FPM_QP_COUNT);
- if (status)
- goto exit;
- status = i40iw_create_hmc_objs(iwdev, true);
- if (status)
- goto exit;
- iwdev->init_state = HMC_OBJS_CREATED;
-exit:
- return status;
-}
-
-/**
- * i40iw_del_init_mem - deallocate memory resources
- * @iwdev: iwarp device
- */
-static void i40iw_del_init_mem(struct i40iw_device *iwdev)
-{
- struct i40iw_sc_dev *dev = &iwdev->sc_dev;
-
- i40iw_free_dma_mem(&iwdev->hw, &iwdev->obj_mem);
- kfree(dev->hmc_info->sd_table.sd_entry);
- dev->hmc_info->sd_table.sd_entry = NULL;
- kfree(iwdev->mem_resources);
- iwdev->mem_resources = NULL;
- kfree(iwdev->ceqlist);
- iwdev->ceqlist = NULL;
- kfree(iwdev->iw_msixtbl);
- iwdev->iw_msixtbl = NULL;
- kfree(iwdev->hmc_info_mem);
- iwdev->hmc_info_mem = NULL;
-}
-
-/**
- * i40iw_del_macip_entry - remove a mac ip address entry from the hw table
- * @iwdev: iwarp device
- * @idx: the index of the mac ip address to delete
- */
-static void i40iw_del_macip_entry(struct i40iw_device *iwdev, u8 idx)
-{
- struct i40iw_cqp *iwcqp = &iwdev->cqp;
- struct i40iw_cqp_request *cqp_request;
- struct cqp_commands_info *cqp_info;
- enum i40iw_status_code status = 0;
-
- cqp_request = i40iw_get_cqp_request(iwcqp, true);
- if (!cqp_request) {
- i40iw_pr_err("cqp_request memory failed\n");
- return;
- }
- cqp_info = &cqp_request->info;
- cqp_info->cqp_cmd = OP_DELETE_LOCAL_MAC_IPADDR_ENTRY;
- cqp_info->post_sq = 1;
- cqp_info->in.u.del_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;
- cqp_info->in.u.del_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
- cqp_info->in.u.del_local_mac_ipaddr_entry.entry_idx = idx;
- cqp_info->in.u.del_local_mac_ipaddr_entry.ignore_ref_count = 0;
- status = i40iw_handle_cqp_op(iwdev, cqp_request);
- if (status)
- i40iw_pr_err("CQP-OP Del MAC Ip entry fail");
-}
-
-/**
- * i40iw_add_mac_ipaddr_entry - add a mac ip address entry to the hw table
- * @iwdev: iwarp device
- * @mac_addr: pointer to mac address
- * @idx: the index of the mac ip address to add
- */
-static enum i40iw_status_code i40iw_add_mac_ipaddr_entry(struct i40iw_device *iwdev,
- u8 *mac_addr,
- u8 idx)
-{
- struct i40iw_local_mac_ipaddr_entry_info *info;
- struct i40iw_cqp *iwcqp = &iwdev->cqp;
- struct i40iw_cqp_request *cqp_request;
- struct cqp_commands_info *cqp_info;
- enum i40iw_status_code status = 0;
-
- cqp_request = i40iw_get_cqp_request(iwcqp, true);
- if (!cqp_request) {
- i40iw_pr_err("cqp_request memory failed\n");
- return I40IW_ERR_NO_MEMORY;
- }
-
- cqp_info = &cqp_request->info;
-
- cqp_info->post_sq = 1;
- info = &cqp_info->in.u.add_local_mac_ipaddr_entry.info;
- ether_addr_copy(info->mac_addr, mac_addr);
- info->entry_idx = idx;
- cqp_info->in.u.add_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
- cqp_info->cqp_cmd = OP_ADD_LOCAL_MAC_IPADDR_ENTRY;
- cqp_info->in.u.add_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;
- cqp_info->in.u.add_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
- status = i40iw_handle_cqp_op(iwdev, cqp_request);
- if (status)
- i40iw_pr_err("CQP-OP Add MAC Ip entry fail");
- return status;
-}
-
-/**
- * i40iw_alloc_local_mac_ipaddr_entry - allocate a mac ip address entry
- * @iwdev: iwarp device
- * @mac_ip_tbl_idx: the index of the new mac ip address
- *
- * Allocate a mac ip address entry and update the mac_ip_tbl_idx
- * to hold the index of the newly created mac ip address
- * Return 0 if successful, otherwise return error
- */
-static enum i40iw_status_code i40iw_alloc_local_mac_ipaddr_entry(struct i40iw_device *iwdev,
- u16 *mac_ip_tbl_idx)
-{
- struct i40iw_cqp *iwcqp = &iwdev->cqp;
- struct i40iw_cqp_request *cqp_request;
- struct cqp_commands_info *cqp_info;
- enum i40iw_status_code status = 0;
-
- cqp_request = i40iw_get_cqp_request(iwcqp, true);
- if (!cqp_request) {
- i40iw_pr_err("cqp_request memory failed\n");
- return I40IW_ERR_NO_MEMORY;
- }
-
- /* increment refcount, because we need the cqp request ret value */
- atomic_inc(&cqp_request->refcount);
-
- cqp_info = &cqp_request->info;
- cqp_info->cqp_cmd = OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY;
- cqp_info->post_sq = 1;
- cqp_info->in.u.alloc_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;
- cqp_info->in.u.alloc_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
- status = i40iw_handle_cqp_op(iwdev, cqp_request);
- if (!status)
- *mac_ip_tbl_idx = cqp_request->compl_info.op_ret_val;
- else
- i40iw_pr_err("CQP-OP Alloc MAC Ip entry fail");
- /* decrement refcount and free the cqp request, if no longer used */
- i40iw_put_cqp_request(iwcqp, cqp_request);
- return status;
-}
-
-/**
- * i40iw_alloc_set_mac_ipaddr - set up a mac ip address table entry
- * @iwdev: iwarp device
- * @macaddr: pointer to mac address
- *
- * Allocate a mac ip address entry and add it to the hw table
- * Return 0 if successful, otherwise return error
- */
-static enum i40iw_status_code i40iw_alloc_set_mac_ipaddr(struct i40iw_device *iwdev,
- u8 *macaddr)
-{
- enum i40iw_status_code status;
-
- status = i40iw_alloc_local_mac_ipaddr_entry(iwdev, &iwdev->mac_ip_table_idx);
- if (!status) {
- status = i40iw_add_mac_ipaddr_entry(iwdev, macaddr,
- (u8)iwdev->mac_ip_table_idx);
- if (status)
- i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
- }
- return status;
-}
-
-/**
- * i40iw_add_ipv6_addr - add ipv6 address to the hw arp table
- * @iwdev: iwarp device
- */
-static void i40iw_add_ipv6_addr(struct i40iw_device *iwdev)
-{
- struct net_device *ip_dev;
- struct inet6_dev *idev;
- struct inet6_ifaddr *ifp, *tmp;
- u32 local_ipaddr6[4];
-
- rcu_read_lock();
- for_each_netdev_rcu(&init_net, ip_dev) {
- if ((((rdma_vlan_dev_vlan_id(ip_dev) < 0xFFFF) &&
- (rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev)) ||
- (ip_dev == iwdev->netdev)) && (ip_dev->flags & IFF_UP)) {
- idev = __in6_dev_get(ip_dev);
- if (!idev) {
- i40iw_pr_err("ipv6 inet device not found\n");
- break;
- }
- list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
- i40iw_pr_info("IP=%pI6, vlan_id=%d, MAC=%pM\n", &ifp->addr,
- rdma_vlan_dev_vlan_id(ip_dev), ip_dev->dev_addr);
- i40iw_copy_ip_ntohl(local_ipaddr6,
- ifp->addr.in6_u.u6_addr32);
- i40iw_manage_arp_cache(iwdev,
- ip_dev->dev_addr,
- local_ipaddr6,
- false,
- I40IW_ARP_ADD);
- }
- }
- }
- rcu_read_unlock();
-}
-
-/**
- * i40iw_add_ipv4_addr - add ipv4 address to the hw arp table
- * @iwdev: iwarp device
- */
-static void i40iw_add_ipv4_addr(struct i40iw_device *iwdev)
-{
- struct net_device *dev;
- struct in_device *idev;
- u32 ip_addr;
-
- rcu_read_lock();
- for_each_netdev_rcu(&init_net, dev) {
- if ((((rdma_vlan_dev_vlan_id(dev) < 0xFFFF) &&
- (rdma_vlan_dev_real_dev(dev) == iwdev->netdev)) ||
- (dev == iwdev->netdev)) && (READ_ONCE(dev->flags) & IFF_UP)) {
- const struct in_ifaddr *ifa;
-
- idev = __in_dev_get_rcu(dev);
- if (!idev)
- continue;
- in_dev_for_each_ifa_rcu(ifa, idev) {
- i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
- "IP=%pI4, vlan_id=%d, MAC=%pM\n", &ifa->ifa_address,
- rdma_vlan_dev_vlan_id(dev), dev->dev_addr);
-
- ip_addr = ntohl(ifa->ifa_address);
- i40iw_manage_arp_cache(iwdev,
- dev->dev_addr,
- &ip_addr,
- true,
- I40IW_ARP_ADD);
- }
- }
- }
- rcu_read_unlock();
-}
-
-/**
- * i40iw_add_mac_ip - add mac and ip addresses
- * @iwdev: iwarp device
- *
- * Create and add a mac ip address entry to the hw table and
- * ipv4/ipv6 addresses to the arp cache
- * Return 0 if successful, otherwise return error
- */
-static enum i40iw_status_code i40iw_add_mac_ip(struct i40iw_device *iwdev)
-{
- struct net_device *netdev = iwdev->netdev;
- enum i40iw_status_code status;
-
- status = i40iw_alloc_set_mac_ipaddr(iwdev, (u8 *)netdev->dev_addr);
- if (status)
- return status;
- i40iw_add_ipv4_addr(iwdev);
- i40iw_add_ipv6_addr(iwdev);
- return 0;
-}
-
-/**
- * i40iw_wait_pe_ready - Check if firmware is ready
- * @hw: provides access to registers
- */
-static void i40iw_wait_pe_ready(struct i40iw_hw *hw)
-{
- u32 statusfw;
- u32 statuscpu0;
- u32 statuscpu1;
- u32 statuscpu2;
- u32 retrycount = 0;
-
- do {
- statusfw = i40iw_rd32(hw, I40E_GLPE_FWLDSTATUS);
- i40iw_pr_info("[%04d] fm load status[x%04X]\n", __LINE__, statusfw);
- statuscpu0 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS0);
- i40iw_pr_info("[%04d] CSR_CQP status[x%04X]\n", __LINE__, statuscpu0);
- statuscpu1 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS1);
- i40iw_pr_info("[%04d] I40E_GLPE_CPUSTATUS1 status[x%04X]\n",
- __LINE__, statuscpu1);
- statuscpu2 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS2);
- i40iw_pr_info("[%04d] I40E_GLPE_CPUSTATUS2 status[x%04X]\n",
- __LINE__, statuscpu2);
- if ((statuscpu0 == 0x80) && (statuscpu1 == 0x80) && (statuscpu2 == 0x80))
- break; /* SUCCESS */
- msleep(1000);
- retrycount++;
- } while (retrycount < 14);
- i40iw_wr32(hw, 0xb4040, 0x4C104C5);
-}
-
-/**
- * i40iw_initialize_dev - initialize device
- * @iwdev: iwarp device
- * @ldev: lan device information
- *
- * Allocate memory for the hmc objects and initialize iwdev
- * Return 0 if successful, otherwise clean up the resources
- * and return error
- */
-static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
- struct i40e_info *ldev)
-{
- enum i40iw_status_code status;
- struct i40iw_sc_dev *dev = &iwdev->sc_dev;
- struct i40iw_device_init_info info;
- struct i40iw_vsi_init_info vsi_info;
- struct i40iw_dma_mem mem;
- struct i40iw_l2params l2params;
- u32 size;
- struct i40iw_vsi_stats_info stats_info;
- u16 last_qset = I40IW_NO_QSET;
- u16 qset;
- u32 i;
-
- memset(&l2params, 0, sizeof(l2params));
- memset(&info, 0, sizeof(info));
- size = sizeof(struct i40iw_hmc_pble_rsrc) + sizeof(struct i40iw_hmc_info) +
- (sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX);
- iwdev->hmc_info_mem = kzalloc(size, GFP_KERNEL);
- if (!iwdev->hmc_info_mem)
- return I40IW_ERR_NO_MEMORY;
-
- iwdev->pble_rsrc = (struct i40iw_hmc_pble_rsrc *)iwdev->hmc_info_mem;
- dev->hmc_info = &iwdev->hw.hmc;
- dev->hmc_info->hmc_obj = (struct i40iw_hmc_obj_info *)(iwdev->pble_rsrc + 1);
- status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_QUERY_FPM_BUF_SIZE,
- I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK);
- if (status)
- goto error;
- info.fpm_query_buf_pa = mem.pa;
- info.fpm_query_buf = mem.va;
- status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_COMMIT_FPM_BUF_SIZE,
- I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK);
- if (status)
- goto error;
- info.fpm_commit_buf_pa = mem.pa;
- info.fpm_commit_buf = mem.va;
- info.hmc_fn_id = ldev->fid;
- info.is_pf = (ldev->ftype) ? false : true;
- info.bar0 = ldev->hw_addr;
- info.hw = &iwdev->hw;
- info.debug_mask = debug;
- l2params.mtu =
- (ldev->params.mtu) ? ldev->params.mtu : I40IW_DEFAULT_MTU;
- for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++) {
- qset = ldev->params.qos.prio_qos[i].qs_handle;
- l2params.qs_handle_list[i] = qset;
- if (last_qset == I40IW_NO_QSET)
- last_qset = qset;
- else if ((qset != last_qset) && (qset != I40IW_NO_QSET))
- iwdev->dcb = true;
- }
- i40iw_pr_info("DCB is set/clear = %d\n", iwdev->dcb);
- info.vchnl_send = i40iw_virtchnl_send;
- status = i40iw_device_init(&iwdev->sc_dev, &info);
-
- if (status)
- goto error;
- memset(&vsi_info, 0, sizeof(vsi_info));
- vsi_info.dev = &iwdev->sc_dev;
- vsi_info.back_vsi = (void *)iwdev;
- vsi_info.params = &l2params;
- vsi_info.exception_lan_queue = 1;
- i40iw_sc_vsi_init(&iwdev->vsi, &vsi_info);
-
- if (dev->is_pf) {
- memset(&stats_info, 0, sizeof(stats_info));
- stats_info.fcn_id = ldev->fid;
- stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL);
- if (!stats_info.pestat) {
- status = I40IW_ERR_NO_MEMORY;
- goto error;
- }
- stats_info.stats_initialize = true;
- if (stats_info.pestat)
- i40iw_vsi_stats_init(&iwdev->vsi, &stats_info);
- }
- return status;
-error:
- kfree(iwdev->hmc_info_mem);
- iwdev->hmc_info_mem = NULL;
- return status;
-}
-
-/**
- * i40iw_register_notifiers - register tcp ip notifiers
- */
-static void i40iw_register_notifiers(void)
-{
- register_inetaddr_notifier(&i40iw_inetaddr_notifier);
- register_inet6addr_notifier(&i40iw_inetaddr6_notifier);
- register_netevent_notifier(&i40iw_net_notifier);
- register_netdevice_notifier(&i40iw_netdevice_notifier);
-}
-
-/**
- * i40iw_unregister_notifiers - unregister tcp ip notifiers
- */
-
-static void i40iw_unregister_notifiers(void)
-{
- unregister_netevent_notifier(&i40iw_net_notifier);
- unregister_inetaddr_notifier(&i40iw_inetaddr_notifier);
- unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
- unregister_netdevice_notifier(&i40iw_netdevice_notifier);
-}
-
-/**
- * i40iw_save_msix_info - copy msix vector information to iwarp device
- * @iwdev: iwarp device
- * @ldev: lan device information
- *
- * Allocate iwdev msix table and copy the ldev msix info to the table
- * Return 0 if successful, otherwise return error
- */
-static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev,
- struct i40e_info *ldev)
-{
- struct i40e_qvlist_info *iw_qvlist;
- struct i40e_qv_info *iw_qvinfo;
- u32 ceq_idx;
- u32 i;
- size_t size;
-
- if (!ldev->msix_count) {
- i40iw_pr_err("No MSI-X vectors\n");
- return I40IW_ERR_CONFIG;
- }
-
- iwdev->msix_count = ldev->msix_count;
-
- size = sizeof(struct i40iw_msix_vector) * iwdev->msix_count;
- size += struct_size(iw_qvlist, qv_info, iwdev->msix_count);
- iwdev->iw_msixtbl = kzalloc(size, GFP_KERNEL);
-
- if (!iwdev->iw_msixtbl)
- return I40IW_ERR_NO_MEMORY;
- iwdev->iw_qvlist = (struct i40e_qvlist_info *)(&iwdev->iw_msixtbl[iwdev->msix_count]);
- iw_qvlist = iwdev->iw_qvlist;
- iw_qvinfo = iw_qvlist->qv_info;
- iw_qvlist->num_vectors = iwdev->msix_count;
- if (iwdev->msix_count <= num_online_cpus())
- iwdev->msix_shared = true;
- for (i = 0, ceq_idx = 0; i < iwdev->msix_count; i++, iw_qvinfo++) {
- iwdev->iw_msixtbl[i].idx = ldev->msix_entries[i].entry;
- iwdev->iw_msixtbl[i].irq = ldev->msix_entries[i].vector;
- iwdev->iw_msixtbl[i].cpu_affinity = ceq_idx;
- if (i == 0) {
- iw_qvinfo->aeq_idx = 0;
- if (iwdev->msix_shared)
- iw_qvinfo->ceq_idx = ceq_idx++;
- else
- iw_qvinfo->ceq_idx = I40E_QUEUE_INVALID_IDX;
- } else {
- iw_qvinfo->aeq_idx = I40E_QUEUE_INVALID_IDX;
- iw_qvinfo->ceq_idx = ceq_idx++;
- }
- iw_qvinfo->itr_idx = 3;
- iw_qvinfo->v_idx = iwdev->iw_msixtbl[i].idx;
- }
- return 0;
-}
-
-/**
- * i40iw_deinit_device - clean up the device resources
- * @iwdev: iwarp device
- *
- * Destroy the ib device interface, remove the mac ip entry and ipv4/ipv6 addresses,
- * destroy the device queues and free the pble and the hmc objects
- */
-static void i40iw_deinit_device(struct i40iw_device *iwdev)
-{
- struct i40e_info *ldev = iwdev->ldev;
-
- struct i40iw_sc_dev *dev = &iwdev->sc_dev;
-
- i40iw_pr_info("state = %d\n", iwdev->init_state);
- if (iwdev->param_wq)
- destroy_workqueue(iwdev->param_wq);
-
- switch (iwdev->init_state) {
- case RDMA_DEV_REGISTERED:
- iwdev->iw_status = 0;
- i40iw_port_ibevent(iwdev);
- i40iw_destroy_rdma_device(iwdev->iwibdev);
- fallthrough;
- case IP_ADDR_REGISTERED:
- if (!iwdev->reset)
- i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
- fallthrough;
- case PBLE_CHUNK_MEM:
- i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc);
- fallthrough;
- case CEQ_CREATED:
- i40iw_dele_ceqs(iwdev);
- fallthrough;
- case AEQ_CREATED:
- i40iw_destroy_aeq(iwdev);
- fallthrough;
- case IEQ_CREATED:
- i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, iwdev->reset);
- fallthrough;
- case ILQ_CREATED:
- i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, iwdev->reset);
- fallthrough;
- case CCQ_CREATED:
- i40iw_destroy_ccq(iwdev);
- fallthrough;
- case HMC_OBJS_CREATED:
- i40iw_del_hmc_objects(dev, dev->hmc_info, true, iwdev->reset);
- fallthrough;
- case CQP_CREATED:
- i40iw_destroy_cqp(iwdev, true);
- fallthrough;
- case INITIAL_STATE:
- i40iw_cleanup_cm_core(&iwdev->cm_core);
- if (iwdev->vsi.pestat) {
- i40iw_vsi_stats_free(&iwdev->vsi);
- kfree(iwdev->vsi.pestat);
- }
- i40iw_del_init_mem(iwdev);
- break;
- case INVALID_STATE:
- default:
- i40iw_pr_err("bad init_state = %d\n", iwdev->init_state);
- break;
- }
-
- i40iw_del_handler(i40iw_find_i40e_handler(ldev));
- kfree(iwdev->hdl);
-}
-
-/**
- * i40iw_setup_init_state - set up the initial device struct
- * @hdl: handler for iwarp device - one per instance
- * @ldev: lan device information
- * @client: iwarp client information, provided during registration
- *
- * Initialize the iwarp device and its hdl information
- * using the ldev and client information
- * Return 0 if successful, otherwise return error
- */
-static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
- struct i40e_info *ldev,
- struct i40e_client *client)
-{
- struct i40iw_device *iwdev = &hdl->device;
- struct i40iw_sc_dev *dev = &iwdev->sc_dev;
- enum i40iw_status_code status;
-
- memcpy(&hdl->ldev, ldev, sizeof(*ldev));
-
- iwdev->mpa_version = mpa_version;
- iwdev->resource_profile = (resource_profile < I40IW_HMC_PROFILE_EQUAL) ?
- (u8)resource_profile + I40IW_HMC_PROFILE_DEFAULT :
- I40IW_HMC_PROFILE_DEFAULT;
- iwdev->max_rdma_vfs =
- (iwdev->resource_profile != I40IW_HMC_PROFILE_DEFAULT) ? max_rdma_vfs : 0;
- iwdev->max_enabled_vfs = iwdev->max_rdma_vfs;
- iwdev->netdev = ldev->netdev;
- hdl->client = client;
- if (!ldev->ftype)
- iwdev->db_start = pci_resource_start(ldev->pcidev, 0) + I40IW_DB_ADDR_OFFSET;
- else
- iwdev->db_start = pci_resource_start(ldev->pcidev, 0) + I40IW_VF_DB_ADDR_OFFSET;
-
- status = i40iw_save_msix_info(iwdev, ldev);
- if (status)
- return status;
- iwdev->hw.pcidev = ldev->pcidev;
- iwdev->hw.hw_addr = ldev->hw_addr;
- status = i40iw_allocate_dma_mem(&iwdev->hw,
- &iwdev->obj_mem, 8192, 4096);
- if (status)
- goto exit;
- iwdev->obj_next = iwdev->obj_mem;
-
- init_waitqueue_head(&iwdev->vchnl_waitq);
- init_waitqueue_head(&dev->vf_reqs);
- init_waitqueue_head(&iwdev->close_wq);
-
- status = i40iw_initialize_dev(iwdev, ldev);
-exit:
- if (status) {
- kfree(iwdev->iw_msixtbl);
- i40iw_free_dma_mem(dev->hw, &iwdev->obj_mem);
- iwdev->iw_msixtbl = NULL;
- }
- return status;
-}
-
-/**
- * i40iw_get_used_rsrc - determine resources used internally
- * @iwdev: iwarp device
- *
- * Called after internal allocations
- */
-static void i40iw_get_used_rsrc(struct i40iw_device *iwdev)
-{
- iwdev->used_pds = find_next_zero_bit(iwdev->allocated_pds, iwdev->max_pd, 0);
- iwdev->used_qps = find_next_zero_bit(iwdev->allocated_qps, iwdev->max_qp, 0);
- iwdev->used_cqs = find_next_zero_bit(iwdev->allocated_cqs, iwdev->max_cq, 0);
- iwdev->used_mrs = find_next_zero_bit(iwdev->allocated_mrs, iwdev->max_mr, 0);
-}
-
-/**
- * i40iw_open - client interface operation open for iwarp/uda device
- * @ldev: lan device information
- * @client: iwarp client information, provided during registration
- *
- * Called by the lan driver during the processing of client register
- * Create device resources, set up queues, pble and hmc objects and
- * register the device with the ib verbs interface
- * Return 0 if successful, otherwise return error
- */
-static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
-{
- struct i40iw_device *iwdev;
- struct i40iw_sc_dev *dev;
- enum i40iw_status_code status;
- struct i40iw_handler *hdl;
-
- hdl = i40iw_find_netdev(ldev->netdev);
- if (hdl)
- return 0;
-
- hdl = kzalloc(sizeof(*hdl), GFP_KERNEL);
- if (!hdl)
- return -ENOMEM;
- iwdev = &hdl->device;
- iwdev->hdl = hdl;
- dev = &iwdev->sc_dev;
- if (i40iw_setup_cm_core(iwdev)) {
- kfree(iwdev->hdl);
- return -ENOMEM;
- }
-
- dev->back_dev = (void *)iwdev;
- iwdev->ldev = &hdl->ldev;
- iwdev->client = client;
- mutex_init(&iwdev->pbl_mutex);
- i40iw_add_handler(hdl);
-
- do {
- status = i40iw_setup_init_state(hdl, ldev, client);
- if (status)
- break;
- iwdev->init_state = INITIAL_STATE;
- if (dev->is_pf)
- i40iw_wait_pe_ready(dev->hw);
- status = i40iw_create_cqp(iwdev);
- if (status)
- break;
- iwdev->init_state = CQP_CREATED;
- status = i40iw_hmc_setup(iwdev);
- if (status)
- break;
- status = i40iw_create_ccq(iwdev);
- if (status)
- break;
- iwdev->init_state = CCQ_CREATED;
- status = i40iw_initialize_ilq(iwdev);
- if (status)
- break;
- iwdev->init_state = ILQ_CREATED;
- status = i40iw_initialize_ieq(iwdev);
- if (status)
- break;
- iwdev->init_state = IEQ_CREATED;
- status = i40iw_setup_aeq(iwdev);
- if (status)
- break;
- iwdev->init_state = AEQ_CREATED;
- status = i40iw_setup_ceqs(iwdev, ldev);
- if (status)
- break;
-
- status = i40iw_get_rdma_features(dev);
- if (status)
- dev->feature_info[I40IW_FEATURE_FW_INFO] =
- I40IW_FW_VER_DEFAULT;
-
- iwdev->init_state = CEQ_CREATED;
- status = i40iw_initialize_hw_resources(iwdev);
- if (status)
- break;
- i40iw_get_used_rsrc(iwdev);
- dev->ccq_ops->ccq_arm(dev->ccq);
- status = i40iw_hmc_init_pble(&iwdev->sc_dev, iwdev->pble_rsrc);
- if (status)
- break;
- iwdev->init_state = PBLE_CHUNK_MEM;
- iwdev->virtchnl_wq = alloc_ordered_workqueue("iwvch", WQ_MEM_RECLAIM);
- status = i40iw_add_mac_ip(iwdev);
- if (status)
- break;
- iwdev->init_state = IP_ADDR_REGISTERED;
- if (i40iw_register_rdma_device(iwdev)) {
- i40iw_pr_err("register rdma device fail\n");
- break;
- };
-
- iwdev->init_state = RDMA_DEV_REGISTERED;
- iwdev->iw_status = 1;
- i40iw_port_ibevent(iwdev);
- iwdev->param_wq = alloc_ordered_workqueue("l2params", WQ_MEM_RECLAIM);
- if(iwdev->param_wq == NULL)
- break;
- i40iw_pr_info("i40iw_open completed\n");
- return 0;
- } while (0);
-
- i40iw_pr_err("status = %d last completion = %d\n", status, iwdev->init_state);
- i40iw_deinit_device(iwdev);
- return -ERESTART;
-}
-
-/**
- * i40iw_l2params_worker - worker for l2 params change
- * @work: work pointer for l2 params
- */
-static void i40iw_l2params_worker(struct work_struct *work)
-{
- struct l2params_work *dwork =
- container_of(work, struct l2params_work, work);
- struct i40iw_device *iwdev = dwork->iwdev;
-
- i40iw_change_l2params(&iwdev->vsi, &dwork->l2params);
- atomic_dec(&iwdev->params_busy);
- kfree(work);
-}
-
-/**
- * i40iw_l2param_change - handle qs handles for qos and mss change
- * @ldev: lan device information
- * @client: client for paramater change
- * @params: new parameters from L2
- */
-static void i40iw_l2param_change(struct i40e_info *ldev, struct i40e_client *client,
- struct i40e_params *params)
-{
- struct i40iw_handler *hdl;
- struct i40iw_l2params *l2params;
- struct l2params_work *work;
- struct i40iw_device *iwdev;
- int i;
-
- hdl = i40iw_find_i40e_handler(ldev);
- if (!hdl)
- return;
-
- iwdev = &hdl->device;
-
- if (atomic_read(&iwdev->params_busy))
- return;
-
-
- work = kzalloc(sizeof(*work), GFP_KERNEL);
- if (!work)
- return;
-
- atomic_inc(&iwdev->params_busy);
-
- work->iwdev = iwdev;
- l2params = &work->l2params;
- for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++)
- l2params->qs_handle_list[i] = params->qos.prio_qos[i].qs_handle;
-
- l2params->mtu = (params->mtu) ? params->mtu : iwdev->vsi.mtu;
-
- INIT_WORK(&work->work, i40iw_l2params_worker);
- queue_work(iwdev->param_wq, &work->work);
-}
-
-/**
- * i40iw_close - client interface operation close for iwarp/uda device
- * @ldev: lan device information
- * @reset: true if called before reset
- * @client: client to close
- *
- * Called by the lan driver during the processing of client unregister
- * Destroy and clean up the driver resources
- */
-static void i40iw_close(struct i40e_info *ldev, struct i40e_client *client, bool reset)
-{
- struct i40iw_device *iwdev;
- struct i40iw_handler *hdl;
-
- hdl = i40iw_find_i40e_handler(ldev);
- if (!hdl)
- return;
-
- iwdev = &hdl->device;
- iwdev->closing = true;
-
- if (reset)
- iwdev->reset = true;
-
- i40iw_cm_teardown_connections(iwdev, NULL, NULL, true);
- destroy_workqueue(iwdev->virtchnl_wq);
- i40iw_deinit_device(iwdev);
-}
-
-/**
- * i40iw_vf_reset - process VF reset
- * @ldev: lan device information
- * @client: client interface instance
- * @vf_id: virtual function id
- *
- * Called when a VF is reset by the PF
- * Destroy and clean up the VF resources
- */
-static void i40iw_vf_reset(struct i40e_info *ldev, struct i40e_client *client, u32 vf_id)
-{
- struct i40iw_handler *hdl;
- struct i40iw_sc_dev *dev;
- struct i40iw_hmc_fcn_info hmc_fcn_info;
- struct i40iw_virt_mem vf_dev_mem;
- struct i40iw_vfdev *tmp_vfdev;
- unsigned int i;
- unsigned long flags;
- struct i40iw_device *iwdev;
-
- hdl = i40iw_find_i40e_handler(ldev);
- if (!hdl)
- return;
-
- dev = &hdl->device.sc_dev;
- iwdev = (struct i40iw_device *)dev->back_dev;
-
- for (i = 0; i < I40IW_MAX_PE_ENABLED_VF_COUNT; i++) {
- if (!dev->vf_dev[i] || (dev->vf_dev[i]->vf_id != vf_id))
- continue;
- /* free all resources allocated on behalf of vf */
- tmp_vfdev = dev->vf_dev[i];
- spin_lock_irqsave(&iwdev->vsi.pestat->lock, flags);
- dev->vf_dev[i] = NULL;
- spin_unlock_irqrestore(&iwdev->vsi.pestat->lock, flags);
- i40iw_del_hmc_objects(dev, &tmp_vfdev->hmc_info, false, false);
- /* remove vf hmc function */
- memset(&hmc_fcn_info, 0, sizeof(hmc_fcn_info));
- hmc_fcn_info.vf_id = vf_id;
- hmc_fcn_info.iw_vf_idx = tmp_vfdev->iw_vf_idx;
- hmc_fcn_info.free_fcn = true;
- i40iw_cqp_manage_hmc_fcn_cmd(dev, &hmc_fcn_info);
- /* free vf_dev */
- vf_dev_mem.va = tmp_vfdev;
- vf_dev_mem.size = sizeof(struct i40iw_vfdev) +
- sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX;
- i40iw_free_virt_mem(dev->hw, &vf_dev_mem);
- break;
- }
-}
-
-/**
- * i40iw_vf_enable - enable a number of VFs
- * @ldev: lan device information
- * @client: client interface instance
- * @num_vfs: number of VFs for the PF
- *
- * Called when the number of VFs changes
- */
-static void i40iw_vf_enable(struct i40e_info *ldev,
- struct i40e_client *client,
- u32 num_vfs)
-{
- struct i40iw_handler *hdl;
-
- hdl = i40iw_find_i40e_handler(ldev);
- if (!hdl)
- return;
-
- if (num_vfs > I40IW_MAX_PE_ENABLED_VF_COUNT)
- hdl->device.max_enabled_vfs = I40IW_MAX_PE_ENABLED_VF_COUNT;
- else
- hdl->device.max_enabled_vfs = num_vfs;
-}
-
-/**
- * i40iw_vf_capable - check if VF capable
- * @ldev: lan device information
- * @client: client interface instance
- * @vf_id: virtual function id
- *
- * Return 1 if a VF slot is available or if VF is already RDMA enabled
- * Return 0 otherwise
- */
-static int i40iw_vf_capable(struct i40e_info *ldev,
- struct i40e_client *client,
- u32 vf_id)
-{
- struct i40iw_handler *hdl;
- struct i40iw_sc_dev *dev;
- unsigned int i;
-
- hdl = i40iw_find_i40e_handler(ldev);
- if (!hdl)
- return 0;
-
- dev = &hdl->device.sc_dev;
-
- for (i = 0; i < hdl->device.max_enabled_vfs; i++) {
- if (!dev->vf_dev[i] || (dev->vf_dev[i]->vf_id == vf_id))
- return 1;
- }
-
- return 0;
-}
-
-/**
- * i40iw_virtchnl_receive - receive a message through the virtual channel
- * @ldev: lan device information
- * @client: client interface instance
- * @vf_id: virtual function id associated with the message
- * @msg: message buffer pointer
- * @len: length of the message
- *
- * Invoke virtual channel receive operation for the given msg
- * Return 0 if successful, otherwise return error
- */
-static int i40iw_virtchnl_receive(struct i40e_info *ldev,
- struct i40e_client *client,
- u32 vf_id,
- u8 *msg,
- u16 len)
-{
- struct i40iw_handler *hdl;
- struct i40iw_sc_dev *dev;
- struct i40iw_device *iwdev;
- int ret_code = I40IW_NOT_SUPPORTED;
-
- if (!len || !msg)
- return I40IW_ERR_PARAM;
-
- hdl = i40iw_find_i40e_handler(ldev);
- if (!hdl)
- return I40IW_ERR_PARAM;
-
- dev = &hdl->device.sc_dev;
- iwdev = dev->back_dev;
-
- if (dev->vchnl_if.vchnl_recv) {
- ret_code = dev->vchnl_if.vchnl_recv(dev, vf_id, msg, len);
- if (!dev->is_pf) {
- atomic_dec(&iwdev->vchnl_msgs);
- wake_up(&iwdev->vchnl_waitq);
- }
- }
- return ret_code;
-}
-
-/**
- * i40iw_vf_clear_to_send - wait to send virtual channel message
- * @dev: iwarp device *
- * Wait for until virtual channel is clear
- * before sending the next message
- *
- * Returns false if error
- * Returns true if clear to send
- */
-bool i40iw_vf_clear_to_send(struct i40iw_sc_dev *dev)
-{
- struct i40iw_device *iwdev;
- wait_queue_entry_t wait;
-
- iwdev = dev->back_dev;
-
- if (!wq_has_sleeper(&dev->vf_reqs) &&
- (atomic_read(&iwdev->vchnl_msgs) == 0))
- return true; /* virtual channel is clear */
-
- init_wait(&wait);
- add_wait_queue_exclusive(&dev->vf_reqs, &wait);
-
- if (!wait_event_timeout(dev->vf_reqs,
- (atomic_read(&iwdev->vchnl_msgs) == 0),
- I40IW_VCHNL_EVENT_TIMEOUT))
- dev->vchnl_up = false;
-
- remove_wait_queue(&dev->vf_reqs, &wait);
-
- return dev->vchnl_up;
-}
-
-/**
- * i40iw_virtchnl_send - send a message through the virtual channel
- * @dev: iwarp device
- * @vf_id: virtual function id associated with the message
- * @msg: virtual channel message buffer pointer
- * @len: length of the message
- *
- * Invoke virtual channel send operation for the given msg
- * Return 0 if successful, otherwise return error
- */
-static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev,
- u32 vf_id,
- u8 *msg,
- u16 len)
-{
- struct i40iw_device *iwdev;
- struct i40e_info *ldev;
-
- if (!dev || !dev->back_dev)
- return I40IW_ERR_BAD_PTR;
-
- iwdev = dev->back_dev;
- ldev = iwdev->ldev;
-
- if (ldev && ldev->ops && ldev->ops->virtchnl_send)
- return ldev->ops->virtchnl_send(ldev, &i40iw_client, vf_id, msg, len);
- return I40IW_ERR_BAD_PTR;
-}
-
-/* client interface functions */
-static const struct i40e_client_ops i40e_ops = {
- .open = i40iw_open,
- .close = i40iw_close,
- .l2_param_change = i40iw_l2param_change,
- .virtchnl_receive = i40iw_virtchnl_receive,
- .vf_reset = i40iw_vf_reset,
- .vf_enable = i40iw_vf_enable,
- .vf_capable = i40iw_vf_capable
-};
-
-/**
- * i40iw_init_module - driver initialization function
- *
- * First function to call when the driver is loaded
- * Register the driver as i40e client and port mapper client
- */
-static int __init i40iw_init_module(void)
-{
- int ret;
-
- memset(&i40iw_client, 0, sizeof(i40iw_client));
- i40iw_client.version.major = CLIENT_IW_INTERFACE_VERSION_MAJOR;
- i40iw_client.version.minor = CLIENT_IW_INTERFACE_VERSION_MINOR;
- i40iw_client.version.build = CLIENT_IW_INTERFACE_VERSION_BUILD;
- i40iw_client.ops = &i40e_ops;
- memcpy(i40iw_client.name, i40iw_client_name, I40E_CLIENT_STR_LENGTH);
- i40iw_client.type = I40E_CLIENT_IWARP;
- ret = i40e_register_client(&i40iw_client);
- i40iw_register_notifiers();
-
- return ret;
-}
-
-/**
- * i40iw_exit_module - driver exit clean up function
- *
- * The function is called just before the driver is unloaded
- * Unregister the driver as i40e client and port mapper client
- */
-static void __exit i40iw_exit_module(void)
-{
- i40iw_unregister_notifiers();
- i40e_unregister_client(&i40iw_client);
-}
-
-module_init(i40iw_init_module);
-module_exit(i40iw_exit_module);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_osdep.h b/drivers/infiniband/hw/i40iw/i40iw_osdep.h
deleted file mode 100644
index d938ccb195b1..000000000000
--- a/drivers/infiniband/hw/i40iw/i40iw_osdep.h
+++ /dev/null
@@ -1,195 +0,0 @@
-/*******************************************************************************
-*
-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
-*
-* This software is available to you under a choice of one of two
-* licenses. You may choose to be licensed under the terms of the GNU
-* General Public License (GPL) Version 2, available from the file
-* COPYING in the main directory of this source tree, or the
-* OpenFabrics.org BSD license below:
-*
-* Redistribution and use in source and binary forms, with or
-* without modification, are permitted provided that the following
-* conditions are met:
-*
-* - Redistributions of source code must retain the above
-* copyright notice, this list of conditions and the following
-* disclaimer.
-*
-* - Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials
-* provided with the distribution.
-*
-* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-* SOFTWARE.
-*
-*******************************************************************************/
-
-#ifndef I40IW_OSDEP_H
-#define I40IW_OSDEP_H
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/bitops.h>
-#include <net/tcp.h>
-#include <crypto/hash.h>
-/* get readq/writeq support for 32 bit kernels, use the low-first version */
-#include <linux/io-64-nonatomic-lo-hi.h>
-
-#define STATS_TIMER_DELAY 1000
-
-static inline void set_64bit_val(u64 *wqe_words, u32 byte_index, u64 value)
-{
- wqe_words[byte_index >> 3] = value;
-}
-
-/**
- * get_64bit_val - read 64 bit value from wqe
- * @wqe_words: wqe addr
- * @byte_index: index to read from
- * @value: read value
- **/
-static inline void get_64bit_val(u64 *wqe_words, u32 byte_index, u64 *value)
-{
- *value = wqe_words[byte_index >> 3];
-}
-
-struct i40iw_dma_mem {
- void *va;
- dma_addr_t pa;
- u32 size;
-} __packed;
-
-struct i40iw_virt_mem {
- void *va;
- u32 size;
-} __packed;
-
-#define i40iw_debug(h, m, s, ...) \
-do { \
- if (((m) & (h)->debug_mask)) \
- pr_info("i40iw " s, ##__VA_ARGS__); \
-} while (0)
-
-#define i40iw_flush(a) readl((a)->hw_addr + I40E_GLGEN_STAT)
-
-#define I40E_GLHMC_VFSDCMD(_i) (0x000C8000 + ((_i) * 4)) \
- /* _i=0...31 */
-#define I40E_GLHMC_VFSDCMD_MAX_INDEX 31
-#define I40E_GLHMC_VFSDCMD_PMSDIDX_SHIFT 0
-#define I40E_GLHMC_VFSDCMD_PMSDIDX_MASK (0xFFF \
- << I40E_GLHMC_VFSDCMD_PMSDIDX_SHIFT)
-#define I40E_GLHMC_VFSDCMD_PF_SHIFT 16
-#define I40E_GLHMC_VFSDCMD_PF_MASK (0xF << I40E_GLHMC_VFSDCMD_PF_SHIFT)
-#define I40E_GLHMC_VFSDCMD_VF_SHIFT 20
-#define I40E_GLHMC_VFSDCMD_VF_MASK (0x1FF << I40E_GLHMC_VFSDCMD_VF_SHIFT)
-#define I40E_GLHMC_VFSDCMD_PMF_TYPE_SHIFT 29
-#define I40E_GLHMC_VFSDCMD_PMF_TYPE_MASK (0x3 \
- << I40E_GLHMC_VFSDCMD_PMF_TYPE_SHIFT)
-#define I40E_GLHMC_VFSDCMD_PMSDWR_SHIFT 31
-#define I40E_GLHMC_VFSDCMD_PMSDWR_MASK (0x1 << I40E_GLHMC_VFSDCMD_PMSDWR_SHIFT)
-
-#define I40E_GLHMC_VFSDDATAHIGH(_i) (0x000C8200 + ((_i) * 4)) \
- /* _i=0...31 */
-#define I40E_GLHMC_VFSDDATAHIGH_MAX_INDEX 31
-#define I40E_GLHMC_VFSDDATAHIGH_PMSDDATAHIGH_SHIFT 0
-#define I40E_GLHMC_VFSDDATAHIGH_PMSDDATAHIGH_MASK (0xFFFFFFFF \
- << I40E_GLHMC_VFSDDATAHIGH_PMSDDATAHIGH_SHIFT)
-
-#define I40E_GLHMC_VFSDDATALOW(_i) (0x000C8100 + ((_i) * 4)) \
- /* _i=0...31 */
-#define I40E_GLHMC_VFSDDATALOW_MAX_INDEX 31
-#define I40E_GLHMC_VFSDDATALOW_PMSDVALID_SHIFT 0
-#define I40E_GLHMC_VFSDDATALOW_PMSDVALID_MASK (0x1 \
- << I40E_GLHMC_VFSDDATALOW_PMSDVALID_SHIFT)
-#define I40E_GLHMC_VFSDDATALOW_PMSDTYPE_SHIFT 1
-#define I40E_GLHMC_VFSDDATALOW_PMSDTYPE_MASK (0x1 \
- << I40E_GLHMC_VFSDDATALOW_PMSDTYPE_SHIFT)
-#define I40E_GLHMC_VFSDDATALOW_PMSDBPCOUNT_SHIFT 2
-#define I40E_GLHMC_VFSDDATALOW_PMSDBPCOUNT_MASK (0x3FF \
- << I40E_GLHMC_VFSDDATALOW_PMSDBPCOUNT_SHIFT)
-#define I40E_GLHMC_VFSDDATALOW_PMSDDATALOW_SHIFT 12
-#define I40E_GLHMC_VFSDDATALOW_PMSDDATALOW_MASK (0xFFFFF \
- << I40E_GLHMC_VFSDDATALOW_PMSDDATALOW_SHIFT)
-
-#define I40E_GLPE_FWLDSTATUS 0x0000D200
-#define I40E_GLPE_FWLDSTATUS_LOAD_REQUESTED_SHIFT 0
-#define I40E_GLPE_FWLDSTATUS_LOAD_REQUESTED_MASK (0x1 \
- << I40E_GLPE_FWLDSTATUS_LOAD_REQUESTED_SHIFT)
-#define I40E_GLPE_FWLDSTATUS_DONE_SHIFT 1
-#define I40E_GLPE_FWLDSTATUS_DONE_MASK (0x1 << I40E_GLPE_FWLDSTATUS_DONE_SHIFT)
-#define I40E_GLPE_FWLDSTATUS_CQP_FAIL_SHIFT 2
-#define I40E_GLPE_FWLDSTATUS_CQP_FAIL_MASK (0x1 \
- << I40E_GLPE_FWLDSTATUS_CQP_FAIL_SHIFT)
-#define I40E_GLPE_FWLDSTATUS_TEP_FAIL_SHIFT 3
-#define I40E_GLPE_FWLDSTATUS_TEP_FAIL_MASK (0x1 \
- << I40E_GLPE_FWLDSTATUS_TEP_FAIL_SHIFT)
-#define I40E_GLPE_FWLDSTATUS_OOP_FAIL_SHIFT 4
-#define I40E_GLPE_FWLDSTATUS_OOP_FAIL_MASK (0x1 \
- << I40E_GLPE_FWLDSTATUS_OOP_FAIL_SHIFT)
-
-struct i40iw_sc_dev;
-struct i40iw_sc_qp;
-struct i40iw_puda_buf;
-struct i40iw_puda_completion_info;
-struct i40iw_update_sds_info;
-struct i40iw_hmc_fcn_info;
-struct i40iw_virtchnl_work_info;
-struct i40iw_manage_vf_pble_info;
-struct i40iw_device;
-struct i40iw_hmc_info;
-struct i40iw_hw;
-
-u8 __iomem *i40iw_get_hw_addr(void *dev);
-void i40iw_ieq_mpa_crc_ae(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
-enum i40iw_status_code i40iw_vf_wait_vchnl_resp(struct i40iw_sc_dev *dev);
-bool i40iw_vf_clear_to_send(struct i40iw_sc_dev *dev);
-enum i40iw_status_code i40iw_ieq_check_mpacrc(struct shash_desc *desc, void *addr,
- u32 length, u32 value);
-struct i40iw_sc_qp *i40iw_ieq_get_qp(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *buf);
-void i40iw_ieq_update_tcpip_info(struct i40iw_puda_buf *buf, u16 length, u32 seqnum);
-void i40iw_free_hash_desc(struct shash_desc *);
-enum i40iw_status_code i40iw_init_hash_desc(struct shash_desc **);
-enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_info *info,
- struct i40iw_puda_buf *buf);
-enum i40iw_status_code i40iw_cqp_sds_cmd(struct i40iw_sc_dev *dev,
- struct i40iw_update_sds_info *info);
-enum i40iw_status_code i40iw_cqp_manage_hmc_fcn_cmd(struct i40iw_sc_dev *dev,
- struct i40iw_hmc_fcn_info *hmcfcninfo);
-enum i40iw_status_code i40iw_cqp_query_fpm_values_cmd(struct i40iw_sc_dev *dev,
- struct i40iw_dma_mem *values_mem,
- u8 hmc_fn_id);
-enum i40iw_status_code i40iw_cqp_commit_fpm_values_cmd(struct i40iw_sc_dev *dev,
- struct i40iw_dma_mem *values_mem,
- u8 hmc_fn_id);
-enum i40iw_status_code i40iw_alloc_query_fpm_buf(struct i40iw_sc_dev *dev,
- struct i40iw_dma_mem *mem);
-enum i40iw_status_code i40iw_cqp_manage_vf_pble_bp(struct i40iw_sc_dev *dev,
- struct i40iw_manage_vf_pble_info *info);
-void i40iw_cqp_spawn_worker(struct i40iw_sc_dev *dev,
- struct i40iw_virtchnl_work_info *work_info, u32 iw_vf_idx);
-void *i40iw_remove_head(struct list_head *list);
-void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, bool suspend);
-
-void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len);
-void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred);
-void i40iw_terminate_start_timer(struct i40iw_sc_qp *qp);
-void i40iw_terminate_del_timer(struct i40iw_sc_qp *qp);
-
-enum i40iw_status_code i40iw_hw_manage_vf_pble_bp(struct i40iw_device *iwdev,
- struct i40iw_manage_vf_pble_info *info,
- bool wait);
-struct i40iw_sc_vsi;
-void i40iw_hw_stats_start_timer(struct i40iw_sc_vsi *vsi);
-void i40iw_hw_stats_stop_timer(struct i40iw_sc_vsi *vsi);
-#define i40iw_mmiowb() do { } while (0)
-void i40iw_wr32(struct i40iw_hw *hw, u32 reg, u32 value);
-u32 i40iw_rd32(struct i40iw_hw *hw, u32 reg);
-#endif /* _I40IW_OSDEP_H_ */
diff --git a/drivers/infiniband/hw/i40iw/i40iw_p.h b/drivers/infiniband/hw/i40iw/i40iw_p.h
deleted file mode 100644
index 4c429567bbb4..000000000000
--- a/drivers/infiniband/hw/i40iw/i40iw_p.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/*******************************************************************************
-*
-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
-*
-* This software is available to you under a choice of one of two
-* licenses. You may choose to be licensed under the terms of the GNU
-* General Public License (GPL) Version 2, available from the file
-* COPYING in the main directory of this source tree, or the
-* OpenFabrics.org BSD license below:
-*
-* Redistribution and use in source and binary forms, with or
-* without modification, are permitted provided that the following
-* conditions are met:
-*
-* - Redistributions of source code must retain the above
-* copyright notice, this list of conditions and the following
-* disclaimer.
-*
-* - Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials
-* provided with the distribution.
-*
-* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-* SOFTWARE.
-*
-*******************************************************************************/
-
-#ifndef I40IW_P_H
-#define I40IW_P_H
-
-#define PAUSE_TIMER_VALUE 0xFFFF
-#define REFRESH_THRESHOLD 0x7FFF
-#define HIGH_THRESHOLD 0x800
-#define LOW_THRESHOLD 0x200
-#define ALL_TC2PFC 0xFF
-#define CQP_COMPL_WAIT_TIME 0x3E8
-#define CQP_TIMEOUT_THRESHOLD 5
-
-void i40iw_debug_buf(struct i40iw_sc_dev *dev, enum i40iw_debug_flag mask,
- char *desc, u64 *buf, u32 size);
-/* init operations */
-enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev,
- struct i40iw_device_init_info *info);
-
-void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp);
-
-u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch);
-
-void i40iw_check_cqp_progress(struct i40iw_cqp_timeout *cqp_timeout, struct i40iw_sc_dev *dev);
-
-enum i40iw_status_code i40iw_sc_mr_fast_register(struct i40iw_sc_qp *qp,
- struct i40iw_fast_reg_stag_info *info,
- bool post_sq);
-
-void i40iw_insert_wqe_hdr(u64 *wqe, u64 header);
-
-/* HMC/FPM functions */
-enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev,
- u8 hmc_fn_id);
-
-enum i40iw_status_code i40iw_pf_init_vfhmc(struct i40iw_sc_dev *dev, u8 vf_hmc_fn_id,
- u32 *vf_cnt_array);
-
-/* stats functions */
-void i40iw_hw_stats_refresh_all(struct i40iw_vsi_pestat *stats);
-void i40iw_hw_stats_read_all(struct i40iw_vsi_pestat *stats, struct i40iw_dev_hw_stats *stats_values);
-void i40iw_hw_stats_read_32(struct i40iw_vsi_pestat *stats,
- enum i40iw_hw_stats_index_32b index,
- u64 *value);
-void i40iw_hw_stats_read_64(struct i40iw_vsi_pestat *stats,
- enum i40iw_hw_stats_index_64b index,
- u64 *value);
-void i40iw_hw_stats_init(struct i40iw_vsi_pestat *stats, u8 index, bool is_pf);
-
-/* vsi misc functions */
-enum i40iw_status_code i40iw_vsi_stats_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_stats_info *info);
-void i40iw_vsi_stats_free(struct i40iw_sc_vsi *vsi);
-void i40iw_sc_vsi_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_init_info *info);
-
-void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2params);
-void i40iw_qp_add_qos(struct i40iw_sc_qp *qp);
-void i40iw_qp_rem_qos(struct i40iw_sc_qp *qp);
-void i40iw_terminate_send_fin(struct i40iw_sc_qp *qp);
-
-void i40iw_terminate_connection(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info);
-
-void i40iw_terminate_received(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info);
-
-enum i40iw_status_code i40iw_sc_suspend_qp(struct i40iw_sc_cqp *cqp,
- struct i40iw_sc_qp *qp, u64 scratch);
-
-enum i40iw_status_code i40iw_sc_resume_qp(struct i40iw_sc_cqp *cqp,
- struct i40iw_sc_qp *qp, u64 scratch);
-
-enum i40iw_status_code i40iw_sc_static_hmc_pages_allocated(struct i40iw_sc_cqp *cqp,
- u64 scratch, u8 hmc_fn_id,
- bool post_sq,
- bool poll_registers);
-
-enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_count);
-enum i40iw_status_code i40iw_get_rdma_features(struct i40iw_sc_dev *dev);
-
-void free_sd_mem(struct i40iw_sc_dev *dev);
-
-enum i40iw_status_code i40iw_process_cqp_cmd(struct i40iw_sc_dev *dev,
- struct cqp_commands_info *pcmdinfo);
-
-enum i40iw_status_code i40iw_process_bh(struct i40iw_sc_dev *dev);
-
-/* prototype for functions used for dynamic memory allocation */
-enum i40iw_status_code i40iw_allocate_dma_mem(struct i40iw_hw *hw,
- struct i40iw_dma_mem *mem, u64 size,
- u32 alignment);
-void i40iw_free_dma_mem(struct i40iw_hw *hw, struct i40iw_dma_mem *mem);
-enum i40iw_status_code i40iw_allocate_virt_mem(struct i40iw_hw *hw,
- struct i40iw_virt_mem *mem, u32 size);
-enum i40iw_status_code i40iw_free_virt_mem(struct i40iw_hw *hw,
- struct i40iw_virt_mem *mem);
-u8 i40iw_get_encoded_wqe_size(u32 wqsize, bool cqpsq);
-void i40iw_reinitialize_ieq(struct i40iw_sc_dev *dev);
-
-#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_pble.c b/drivers/infiniband/hw/i40iw/i40iw_pble.c
deleted file mode 100644
index 146a4148219b..000000000000
--- a/drivers/infiniband/hw/i40iw/i40iw_pble.c
+++ /dev/null
@@ -1,611 +0,0 @@
-/*******************************************************************************
-*
-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
-*
-* This software is available to you under a choice of one of two
-* licenses. You may choose to be licensed under the terms of the GNU
-* General Public License (GPL) Version 2, available from the file
-* COPYING in the main directory of this source tree, or the
-* OpenFabrics.org BSD license below:
-*
-* Redistribution and use in source and binary forms, with or
-* without modification, are permitted provided that the following
-* conditions are met:
-*
-* - Redistributions of source code must retain the above
-* copyright notice, this list of conditions and the following
-* disclaimer.
-*
-* - Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials
-* provided with the distribution.
-*
-* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-* SOFTWARE.
-*
-*******************************************************************************/
-
-#include "i40iw_status.h"
-#include "i40iw_osdep.h"
-#include "i40iw_register.h"
-#include "i40iw_hmc.h"
-
-#include "i40iw_d.h"
-#include "i40iw_type.h"
-#include "i40iw_p.h"
-
-#include <linux/pci.h>
-#include <linux/genalloc.h>
-#include <linux/vmalloc.h>
-#include "i40iw_pble.h"
-#include "i40iw.h"
-
-struct i40iw_device;
-static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
- struct i40iw_hmc_pble_rsrc *pble_rsrc);
-static void i40iw_free_vmalloc_mem(struct i40iw_hw *hw, struct i40iw_chunk *chunk);
-
-/**
- * i40iw_destroy_pble_pool - destroy pool during module unload
- * @dev: i40iw_sc_dev struct
- * @pble_rsrc: pble resources
- */
-void i40iw_destroy_pble_pool(struct i40iw_sc_dev *dev, struct i40iw_hmc_pble_rsrc *pble_rsrc)
-{
- struct list_head *clist;
- struct list_head *tlist;
- struct i40iw_chunk *chunk;
- struct i40iw_pble_pool *pinfo = &pble_rsrc->pinfo;
-
- if (pinfo->pool) {
- list_for_each_safe(clist, tlist, &pinfo->clist) {
- chunk = list_entry(clist, struct i40iw_chunk, list);
- if (chunk->type == I40IW_VMALLOC)
- i40iw_free_vmalloc_mem(dev->hw, chunk);
- kfree(chunk);
- }
- gen_pool_destroy(pinfo->pool);
- }
-}
-
-/**
- * i40iw_hmc_init_pble - Initialize pble resources during module load
- * @dev: i40iw_sc_dev struct
- * @pble_rsrc: pble resources
- */
-enum i40iw_status_code i40iw_hmc_init_pble(struct i40iw_sc_dev *dev,
- struct i40iw_hmc_pble_rsrc *pble_rsrc)
-{
- struct i40iw_hmc_info *hmc_info;
- u32 fpm_idx = 0;
-
- hmc_info = dev->hmc_info;
- pble_rsrc->fpm_base_addr = hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].base;
- /* Now start the pble' on 4k boundary */
- if (pble_rsrc->fpm_base_addr & 0xfff)
- fpm_idx = (PAGE_SIZE - (pble_rsrc->fpm_base_addr & 0xfff)) >> 3;
-
- pble_rsrc->unallocated_pble =
- hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt - fpm_idx;
- pble_rsrc->next_fpm_addr = pble_rsrc->fpm_base_addr + (fpm_idx << 3);
-
- pble_rsrc->pinfo.pool_shift = POOL_SHIFT;
- pble_rsrc->pinfo.pool = gen_pool_create(pble_rsrc->pinfo.pool_shift, -1);
- INIT_LIST_HEAD(&pble_rsrc->pinfo.clist);
- if (!pble_rsrc->pinfo.pool)
- goto error;
-
- if (add_pble_pool(dev, pble_rsrc))
- goto error;
-
- return 0;
-
- error:i40iw_destroy_pble_pool(dev, pble_rsrc);
- return I40IW_ERR_NO_MEMORY;
-}
-
-/**
- * get_sd_pd_idx - Returns sd index, pd index and rel_pd_idx from fpm address
- * @pble_rsrc: structure containing fpm address
- * @idx: where to return indexes
- */
-static inline void get_sd_pd_idx(struct i40iw_hmc_pble_rsrc *pble_rsrc,
- struct sd_pd_idx *idx)
-{
- idx->sd_idx = (u32)(pble_rsrc->next_fpm_addr) / I40IW_HMC_DIRECT_BP_SIZE;
- idx->pd_idx = (u32)(pble_rsrc->next_fpm_addr) / I40IW_HMC_PAGED_BP_SIZE;
- idx->rel_pd_idx = (idx->pd_idx % I40IW_HMC_PD_CNT_IN_SD);
-}
-
-/**
- * add_sd_direct - add sd direct for pble
- * @dev: hardware control device structure
- * @pble_rsrc: pble resource ptr
- * @info: page info for sd
- */
-static enum i40iw_status_code add_sd_direct(struct i40iw_sc_dev *dev,
- struct i40iw_hmc_pble_rsrc *pble_rsrc,
- struct i40iw_add_page_info *info)
-{
- enum i40iw_status_code ret_code = 0;
- struct sd_pd_idx *idx = &info->idx;
- struct i40iw_chunk *chunk = info->chunk;
- struct i40iw_hmc_info *hmc_info = info->hmc_info;
- struct i40iw_hmc_sd_entry *sd_entry = info->sd_entry;
- u32 offset = 0;
-
- if (!sd_entry->valid) {
- if (dev->is_pf) {
- ret_code = i40iw_add_sd_table_entry(dev->hw, hmc_info,
- info->idx.sd_idx,
- I40IW_SD_TYPE_DIRECT,
- I40IW_HMC_DIRECT_BP_SIZE);
- if (ret_code)
- return ret_code;
- chunk->type = I40IW_DMA_COHERENT;
- }
- }
- offset = idx->rel_pd_idx << I40IW_HMC_PAGED_BP_SHIFT;
- chunk->size = info->pages << I40IW_HMC_PAGED_BP_SHIFT;
- chunk->vaddr = ((u8 *)sd_entry->u.bp.addr.va + offset);
- chunk->fpm_addr = pble_rsrc->next_fpm_addr;
- i40iw_debug(dev, I40IW_DEBUG_PBLE, "chunk_size[%d] = 0x%x vaddr=%p fpm_addr = %llx\n",
- chunk->size, chunk->size, chunk->vaddr, chunk->fpm_addr);
- return 0;
-}
-
-/**
- * i40iw_free_vmalloc_mem - free vmalloc during close
- * @hw: hw struct
- * @chunk: chunk information for vmalloc
- */
-static void i40iw_free_vmalloc_mem(struct i40iw_hw *hw, struct i40iw_chunk *chunk)
-{
- struct pci_dev *pcidev = hw->pcidev;
- int i;
-
- if (!chunk->pg_cnt)
- goto done;
- for (i = 0; i < chunk->pg_cnt; i++)
- dma_unmap_page(&pcidev->dev, chunk->dmaaddrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
-
- done:
- kfree(chunk->dmaaddrs);
- chunk->dmaaddrs = NULL;
- vfree(chunk->vaddr);
- chunk->vaddr = NULL;
- chunk->type = 0;
-}
-
-/**
- * i40iw_get_vmalloc_mem - get 2M page for sd
- * @hw: hardware address
- * @chunk: chunk to adf
- * @pg_cnt: #of 4 K pages
- */
-static enum i40iw_status_code i40iw_get_vmalloc_mem(struct i40iw_hw *hw,
- struct i40iw_chunk *chunk,
- int pg_cnt)
-{
- struct pci_dev *pcidev = hw->pcidev;
- struct page *page;
- u8 *addr;
- u32 size;
- int i;
-
- chunk->dmaaddrs = kzalloc(pg_cnt << 3, GFP_KERNEL);
- if (!chunk->dmaaddrs)
- return I40IW_ERR_NO_MEMORY;
- size = PAGE_SIZE * pg_cnt;
- chunk->vaddr = vmalloc(size);
- if (!chunk->vaddr) {
- kfree(chunk->dmaaddrs);
- chunk->dmaaddrs = NULL;
- return I40IW_ERR_NO_MEMORY;
- }
- chunk->size = size;
- addr = (u8 *)chunk->vaddr;
- for (i = 0; i < pg_cnt; i++) {
- page = vmalloc_to_page((void *)addr);
- if (!page)
- break;
- chunk->dmaaddrs[i] = dma_map_page(&pcidev->dev, page, 0,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(&pcidev->dev, chunk->dmaaddrs[i]))
- break;
- addr += PAGE_SIZE;
- }
-
- chunk->pg_cnt = i;
- chunk->type = I40IW_VMALLOC;
- if (i == pg_cnt)
- return 0;
-
- i40iw_free_vmalloc_mem(hw, chunk);
- return I40IW_ERR_NO_MEMORY;
-}
-
-/**
- * fpm_to_idx - given fpm address, get pble index
- * @pble_rsrc: pble resource management
- * @addr: fpm address for index
- */
-static inline u32 fpm_to_idx(struct i40iw_hmc_pble_rsrc *pble_rsrc, u64 addr)
-{
- return (addr - (pble_rsrc->fpm_base_addr)) >> 3;
-}
-
-/**
- * add_bp_pages - add backing pages for sd
- * @dev: hardware control device structure
- * @pble_rsrc: pble resource management
- * @info: page info for sd
- */
-static enum i40iw_status_code add_bp_pages(struct i40iw_sc_dev *dev,
- struct i40iw_hmc_pble_rsrc *pble_rsrc,
- struct i40iw_add_page_info *info)
-{
- u8 *addr;
- struct i40iw_dma_mem mem;
- struct i40iw_hmc_pd_entry *pd_entry;
- struct i40iw_hmc_sd_entry *sd_entry = info->sd_entry;
- struct i40iw_hmc_info *hmc_info = info->hmc_info;
- struct i40iw_chunk *chunk = info->chunk;
- struct i40iw_manage_vf_pble_info vf_pble_info;
- enum i40iw_status_code status = 0;
- u32 rel_pd_idx = info->idx.rel_pd_idx;
- u32 pd_idx = info->idx.pd_idx;
- u32 i;
-
- status = i40iw_get_vmalloc_mem(dev->hw, chunk, info->pages);
- if (status)
- return I40IW_ERR_NO_MEMORY;
- status = i40iw_add_sd_table_entry(dev->hw, hmc_info,
- info->idx.sd_idx, I40IW_SD_TYPE_PAGED,
- I40IW_HMC_DIRECT_BP_SIZE);
- if (status)
- goto error;
- if (!dev->is_pf) {
- status = i40iw_vchnl_vf_add_hmc_objs(dev, I40IW_HMC_IW_PBLE,
- fpm_to_idx(pble_rsrc,
- pble_rsrc->next_fpm_addr),
- (info->pages << PBLE_512_SHIFT));
- if (status) {
- i40iw_pr_err("allocate PBLEs in the PF. Error %i\n", status);
- goto error;
- }
- }
- addr = chunk->vaddr;
- for (i = 0; i < info->pages; i++) {
- mem.pa = chunk->dmaaddrs[i];
- mem.size = PAGE_SIZE;
- mem.va = (void *)(addr);
- pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx++];
- if (!pd_entry->valid) {
- status = i40iw_add_pd_table_entry(dev->hw, hmc_info, pd_idx++, &mem);
- if (status)
- goto error;
- addr += PAGE_SIZE;
- } else {
- i40iw_pr_err("pd entry is valid expecting to be invalid\n");
- }
- }
- if (!dev->is_pf) {
- vf_pble_info.first_pd_index = info->idx.rel_pd_idx;
- vf_pble_info.inv_pd_ent = false;
- vf_pble_info.pd_entry_cnt = PBLE_PER_PAGE;
- vf_pble_info.pd_pl_pba = sd_entry->u.pd_table.pd_page_addr.pa;
- vf_pble_info.sd_index = info->idx.sd_idx;
- status = i40iw_hw_manage_vf_pble_bp(dev->back_dev,
- &vf_pble_info, true);
- if (status) {
- i40iw_pr_err("CQP manage VF PBLE BP failed. %i\n", status);
- goto error;
- }
- }
- chunk->fpm_addr = pble_rsrc->next_fpm_addr;
- return 0;
-error:
- i40iw_free_vmalloc_mem(dev->hw, chunk);
- return status;
-}
-
-/**
- * add_pble_pool - add a sd entry for pble resoure
- * @dev: hardware control device structure
- * @pble_rsrc: pble resource management
- */
-static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
- struct i40iw_hmc_pble_rsrc *pble_rsrc)
-{
- struct i40iw_hmc_sd_entry *sd_entry;
- struct i40iw_hmc_info *hmc_info;
- struct i40iw_chunk *chunk;
- struct i40iw_add_page_info info;
- struct sd_pd_idx *idx = &info.idx;
- enum i40iw_status_code ret_code = 0;
- enum i40iw_sd_entry_type sd_entry_type;
- u64 sd_reg_val = 0;
- u32 pages;
-
- if (pble_rsrc->unallocated_pble < PBLE_PER_PAGE)
- return I40IW_ERR_NO_MEMORY;
- if (pble_rsrc->next_fpm_addr & 0xfff) {
- i40iw_pr_err("next fpm_addr %llx\n", pble_rsrc->next_fpm_addr);
- return I40IW_ERR_INVALID_PAGE_DESC_INDEX;
- }
- chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
- if (!chunk)
- return I40IW_ERR_NO_MEMORY;
- hmc_info = dev->hmc_info;
- chunk->fpm_addr = pble_rsrc->next_fpm_addr;
- get_sd_pd_idx(pble_rsrc, idx);
- sd_entry = &hmc_info->sd_table.sd_entry[idx->sd_idx];
- pages = (idx->rel_pd_idx) ? (I40IW_HMC_PD_CNT_IN_SD -
- idx->rel_pd_idx) : I40IW_HMC_PD_CNT_IN_SD;
- pages = min(pages, pble_rsrc->unallocated_pble >> PBLE_512_SHIFT);
- info.chunk = chunk;
- info.hmc_info = hmc_info;
- info.pages = pages;
- info.sd_entry = sd_entry;
- if (!sd_entry->valid) {
- sd_entry_type = (!idx->rel_pd_idx &&
- (pages == I40IW_HMC_PD_CNT_IN_SD) &&
- dev->is_pf) ? I40IW_SD_TYPE_DIRECT : I40IW_SD_TYPE_PAGED;
- } else {
- sd_entry_type = sd_entry->entry_type;
- }
- i40iw_debug(dev, I40IW_DEBUG_PBLE,
- "pages = %d, unallocated_pble[%u] current_fpm_addr = %llx\n",
- pages, pble_rsrc->unallocated_pble, pble_rsrc->next_fpm_addr);
- i40iw_debug(dev, I40IW_DEBUG_PBLE, "sd_entry_type = %d sd_entry valid = %d\n",
- sd_entry_type, sd_entry->valid);
-
- if (sd_entry_type == I40IW_SD_TYPE_DIRECT)
- ret_code = add_sd_direct(dev, pble_rsrc, &info);
- if (ret_code)
- sd_entry_type = I40IW_SD_TYPE_PAGED;
- else
- pble_rsrc->stats_direct_sds++;
-
- if (sd_entry_type == I40IW_SD_TYPE_PAGED) {
- ret_code = add_bp_pages(dev, pble_rsrc, &info);
- if (ret_code)
- goto error;
- else
- pble_rsrc->stats_paged_sds++;
- }
-
- if (gen_pool_add_virt(pble_rsrc->pinfo.pool, (unsigned long)chunk->vaddr,
- (phys_addr_t)chunk->fpm_addr, chunk->size, -1)) {
- i40iw_pr_err("could not allocate memory by gen_pool_addr_virt()\n");
- ret_code = I40IW_ERR_NO_MEMORY;
- goto error;
- }
- pble_rsrc->next_fpm_addr += chunk->size;
- i40iw_debug(dev, I40IW_DEBUG_PBLE, "next_fpm_addr = %llx chunk_size[%u] = 0x%x\n",
- pble_rsrc->next_fpm_addr, chunk->size, chunk->size);
- pble_rsrc->unallocated_pble -= (chunk->size >> 3);
- sd_reg_val = (sd_entry_type == I40IW_SD_TYPE_PAGED) ?
- sd_entry->u.pd_table.pd_page_addr.pa : sd_entry->u.bp.addr.pa;
- if (dev->is_pf && !sd_entry->valid) {
- ret_code = i40iw_hmc_sd_one(dev, hmc_info->hmc_fn_id,
- sd_reg_val, idx->sd_idx,
- sd_entry->entry_type, true);
- if (ret_code) {
- i40iw_pr_err("cqp cmd failed for sd (pbles)\n");
- goto error;
- }
- }
-
- sd_entry->valid = true;
- list_add(&chunk->list, &pble_rsrc->pinfo.clist);
- return 0;
- error:
- kfree(chunk);
- return ret_code;
-}
-
-/**
- * free_lvl2 - fee level 2 pble
- * @pble_rsrc: pble resource management
- * @palloc: level 2 pble allocation
- */
-static void free_lvl2(struct i40iw_hmc_pble_rsrc *pble_rsrc,
- struct i40iw_pble_alloc *palloc)
-{
- u32 i;
- struct gen_pool *pool;
- struct i40iw_pble_level2 *lvl2 = &palloc->level2;
- struct i40iw_pble_info *root = &lvl2->root;
- struct i40iw_pble_info *leaf = lvl2->leaf;
-
- pool = pble_rsrc->pinfo.pool;
-
- for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
- if (leaf->addr)
- gen_pool_free(pool, leaf->addr, (leaf->cnt << 3));
- else
- break;
- }
-
- if (root->addr)
- gen_pool_free(pool, root->addr, (root->cnt << 3));
-
- kfree(lvl2->leaf);
- lvl2->leaf = NULL;
-}
-
-/**
- * get_lvl2_pble - get level 2 pble resource
- * @pble_rsrc: pble resource management
- * @palloc: level 2 pble allocation
- * @pool: pool pointer
- */
-static enum i40iw_status_code get_lvl2_pble(struct i40iw_hmc_pble_rsrc *pble_rsrc,
- struct i40iw_pble_alloc *palloc,
- struct gen_pool *pool)
-{
- u32 lf4k, lflast, total, i;
- u32 pblcnt = PBLE_PER_PAGE;
- u64 *addr;
- struct i40iw_pble_level2 *lvl2 = &palloc->level2;
- struct i40iw_pble_info *root = &lvl2->root;
- struct i40iw_pble_info *leaf;
-
- /* number of full 512 (4K) leafs) */
- lf4k = palloc->total_cnt >> 9;
- lflast = palloc->total_cnt % PBLE_PER_PAGE;
- total = (lflast == 0) ? lf4k : lf4k + 1;
- lvl2->leaf_cnt = total;
-
- leaf = kzalloc((sizeof(*leaf) * total), GFP_ATOMIC);
- if (!leaf)
- return I40IW_ERR_NO_MEMORY;
- lvl2->leaf = leaf;
- /* allocate pbles for the root */
- root->addr = gen_pool_alloc(pool, (total << 3));
- if (!root->addr) {
- kfree(lvl2->leaf);
- lvl2->leaf = NULL;
- return I40IW_ERR_NO_MEMORY;
- }
- root->idx = fpm_to_idx(pble_rsrc,
- (u64)gen_pool_virt_to_phys(pool, root->addr));
- root->cnt = total;
- addr = (u64 *)root->addr;
- for (i = 0; i < total; i++, leaf++) {
- pblcnt = (lflast && ((i + 1) == total)) ? lflast : PBLE_PER_PAGE;
- leaf->addr = gen_pool_alloc(pool, (pblcnt << 3));
- if (!leaf->addr)
- goto error;
- leaf->idx = fpm_to_idx(pble_rsrc, (u64)gen_pool_virt_to_phys(pool, leaf->addr));
-
- leaf->cnt = pblcnt;
- *addr = (u64)leaf->idx;
- addr++;
- }
- palloc->level = I40IW_LEVEL_2;
- pble_rsrc->stats_lvl2++;
- return 0;
- error:
- free_lvl2(pble_rsrc, palloc);
- return I40IW_ERR_NO_MEMORY;
-}
-
-/**
- * get_lvl1_pble - get level 1 pble resource
- * @dev: hardware control device structure
- * @pble_rsrc: pble resource management
- * @palloc: level 1 pble allocation
- */
-static enum i40iw_status_code get_lvl1_pble(struct i40iw_sc_dev *dev,
- struct i40iw_hmc_pble_rsrc *pble_rsrc,
- struct i40iw_pble_alloc *palloc)
-{
- u64 *addr;
- struct gen_pool *pool;
- struct i40iw_pble_info *lvl1 = &palloc->level1;
-
- pool = pble_rsrc->pinfo.pool;
- addr = (u64 *)gen_pool_alloc(pool, (palloc->total_cnt << 3));
-
- if (!addr)
- return I40IW_ERR_NO_MEMORY;
-
- palloc->level = I40IW_LEVEL_1;
- lvl1->addr = (unsigned long)addr;
- lvl1->idx = fpm_to_idx(pble_rsrc, (u64)gen_pool_virt_to_phys(pool,
- (unsigned long)addr));
- lvl1->cnt = palloc->total_cnt;
- pble_rsrc->stats_lvl1++;
- return 0;
-}
-
-/**
- * get_lvl1_lvl2_pble - calls get_lvl1 and get_lvl2 pble routine
- * @dev: i40iw_sc_dev struct
- * @pble_rsrc: pble resources
- * @palloc: contains all inforamtion regarding pble (idx + pble addr)
- * @pool: pointer to general purpose special memory pool descriptor
- */
-static inline enum i40iw_status_code get_lvl1_lvl2_pble(struct i40iw_sc_dev *dev,
- struct i40iw_hmc_pble_rsrc *pble_rsrc,
- struct i40iw_pble_alloc *palloc,
- struct gen_pool *pool)
-{
- enum i40iw_status_code status = 0;
-
- status = get_lvl1_pble(dev, pble_rsrc, palloc);
- if (status && (palloc->total_cnt > PBLE_PER_PAGE))
- status = get_lvl2_pble(pble_rsrc, palloc, pool);
- return status;
-}
-
-/**
- * i40iw_get_pble - allocate pbles from the pool
- * @dev: i40iw_sc_dev struct
- * @pble_rsrc: pble resources
- * @palloc: contains all inforamtion regarding pble (idx + pble addr)
- * @pble_cnt: #of pbles requested
- */
-enum i40iw_status_code i40iw_get_pble(struct i40iw_sc_dev *dev,
- struct i40iw_hmc_pble_rsrc *pble_rsrc,
- struct i40iw_pble_alloc *palloc,
- u32 pble_cnt)
-{
- struct gen_pool *pool;
- enum i40iw_status_code status = 0;
- u32 max_sds = 0;
- int i;
-
- pool = pble_rsrc->pinfo.pool;
- palloc->total_cnt = pble_cnt;
- palloc->level = I40IW_LEVEL_0;
- /*check first to see if we can get pble's without acquiring additional sd's */
- status = get_lvl1_lvl2_pble(dev, pble_rsrc, palloc, pool);
- if (!status)
- goto exit;
- max_sds = (palloc->total_cnt >> 18) + 1;
- for (i = 0; i < max_sds; i++) {
- status = add_pble_pool(dev, pble_rsrc);
- if (status)
- break;
- status = get_lvl1_lvl2_pble(dev, pble_rsrc, palloc, pool);
- if (!status)
- break;
- }
-exit:
- if (!status)
- pble_rsrc->stats_alloc_ok++;
- else
- pble_rsrc->stats_alloc_fail++;
-
- return status;
-}
-
-/**
- * i40iw_free_pble - put pbles back into pool
- * @pble_rsrc: pble resources
- * @palloc: contains all inforamtion regarding pble resource being freed
- */
-void i40iw_free_pble(struct i40iw_hmc_pble_rsrc *pble_rsrc,
- struct i40iw_pble_alloc *palloc)
-{
- struct gen_pool *pool;
-
- pool = pble_rsrc->pinfo.pool;
- if (palloc->level == I40IW_LEVEL_2)
- free_lvl2(pble_rsrc, palloc);
- else
- gen_pool_free(pool, palloc->level1.addr,
- (palloc->level1.cnt << 3));
- pble_rsrc->stats_alloc_freed++;
-}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_pble.h b/drivers/infiniband/hw/i40iw/i40iw_pble.h
deleted file mode 100644
index 7b1851d21cc0..000000000000
--- a/drivers/infiniband/hw/i40iw/i40iw_pble.h
+++ /dev/null
@@ -1,131 +0,0 @@
-/*******************************************************************************
-*
-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
-*
-* This software is available to you under a choice of one of two
-* licenses. You may choose to be licensed under the terms of the GNU
-* General Public License (GPL) Version 2, available from the file
-* COPYING in the main directory of this source tree, or the
-* OpenFabrics.org BSD license below:
-*
-* Redistribution and use in source and binary forms, with or
-* without modification, are permitted provided that the following
-* conditions are met:
-*
-* - Redistributions of source code must retain the above
-* copyright notice, this list of conditions and the following
-* disclaimer.
-*
-* - Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials
-* provided with the distribution.
-*
-* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-* SOFTWARE.
-*
-*******************************************************************************/
-
-#ifndef I40IW_PBLE_H
-#define I40IW_PBLE_H
-
-#define POOL_SHIFT 6
-#define PBLE_PER_PAGE 512
-#define I40IW_HMC_PAGED_BP_SHIFT 12
-#define PBLE_512_SHIFT 9
-
-enum i40iw_pble_level {
- I40IW_LEVEL_0 = 0,
- I40IW_LEVEL_1 = 1,
- I40IW_LEVEL_2 = 2
-};
-
-enum i40iw_alloc_type {
- I40IW_NO_ALLOC = 0,
- I40IW_DMA_COHERENT = 1,
- I40IW_VMALLOC = 2
-};
-
-struct i40iw_pble_info {
- unsigned long addr;
- u32 idx;
- u32 cnt;
-};
-
-struct i40iw_pble_level2 {
- struct i40iw_pble_info root;
- struct i40iw_pble_info *leaf;
- u32 leaf_cnt;
-};
-
-struct i40iw_pble_alloc {
- u32 total_cnt;
- enum i40iw_pble_level level;
- union {
- struct i40iw_pble_info level1;
- struct i40iw_pble_level2 level2;
- };
-};
-
-struct sd_pd_idx {
- u32 sd_idx;
- u32 pd_idx;
- u32 rel_pd_idx;
-};
-
-struct i40iw_add_page_info {
- struct i40iw_chunk *chunk;
- struct i40iw_hmc_sd_entry *sd_entry;
- struct i40iw_hmc_info *hmc_info;
- struct sd_pd_idx idx;
- u32 pages;
-};
-
-struct i40iw_chunk {
- struct list_head list;
- u32 size;
- void *vaddr;
- u64 fpm_addr;
- u32 pg_cnt;
- dma_addr_t *dmaaddrs;
- enum i40iw_alloc_type type;
-};
-
-struct i40iw_pble_pool {
- struct gen_pool *pool;
- struct list_head clist;
- u32 total_pble_alloc;
- u32 free_pble_cnt;
- u32 pool_shift;
-};
-
-struct i40iw_hmc_pble_rsrc {
- u32 unallocated_pble;
- u64 fpm_base_addr;
- u64 next_fpm_addr;
- struct i40iw_pble_pool pinfo;
-
- u32 stats_direct_sds;
- u32 stats_paged_sds;
- u64 stats_alloc_ok;
- u64 stats_alloc_fail;
- u64 stats_alloc_freed;
- u64 stats_lvl1;
- u64 stats_lvl2;
-};
-
-void i40iw_destroy_pble_pool(struct i40iw_sc_dev *dev, struct i40iw_hmc_pble_rsrc *pble_rsrc);
-enum i40iw_status_code i40iw_hmc_init_pble(struct i40iw_sc_dev *dev,
- struct i40iw_hmc_pble_rsrc *pble_rsrc);
-void i40iw_free_pble(struct i40iw_hmc_pble_rsrc *pble_rsrc, struct i40iw_pble_alloc *palloc);
-enum i40iw_status_code i40iw_get_pble(struct i40iw_sc_dev *dev,
- struct i40iw_hmc_pble_rsrc *pble_rsrc,
- struct i40iw_pble_alloc *palloc,
- u32 pble_cnt);
-#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
deleted file mode 100644
index 88fb68e866ba..000000000000
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
+++ /dev/null
@@ -1,1496 +0,0 @@
-/*******************************************************************************
-*
-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
-*
-* This software is available to you under a choice of one of two
-* licenses. You may choose to be licensed under the terms of the GNU
-* General Public License (GPL) Version 2, available from the file
-* COPYING in the main directory of this source tree, or the
-* OpenFabrics.org BSD license below:
-*
-* Redistribution and use in source and binary forms, with or
-* without modification, are permitted provided that the following
-* conditions are met:
-*
-* - Redistributions of source code must retain the above
-* copyright notice, this list of conditions and the following
-* disclaimer.
-*
-* - Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials
-* provided with the distribution.
-*
-* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-* SOFTWARE.
-*
-*******************************************************************************/
-
-#include "i40iw_osdep.h"
-#include "i40iw_register.h"
-#include "i40iw_status.h"
-#include "i40iw_hmc.h"
-
-#include "i40iw_d.h"
-#include "i40iw_type.h"
-#include "i40iw_p.h"
-#include "i40iw_puda.h"
-
-static void i40iw_ieq_receive(struct i40iw_sc_vsi *vsi,
- struct i40iw_puda_buf *buf);
-static void i40iw_ieq_tx_compl(struct i40iw_sc_vsi *vsi, void *sqwrid);
-static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp *qp, u32 wqe_idx);
-static enum i40iw_status_code i40iw_puda_replenish_rq(struct i40iw_puda_rsrc
- *rsrc, bool initial);
-/**
- * i40iw_puda_get_listbuf - get buffer from puda list
- * @list: list to use for buffers (ILQ or IEQ)
- */
-static struct i40iw_puda_buf *i40iw_puda_get_listbuf(struct list_head *list)
-{
- struct i40iw_puda_buf *buf = NULL;
-
- if (!list_empty(list)) {
- buf = (struct i40iw_puda_buf *)list->next;
- list_del((struct list_head *)&buf->list);
- }
- return buf;
-}
-
-/**
- * i40iw_puda_get_bufpool - return buffer from resource
- * @rsrc: resource to use for buffer
- */
-struct i40iw_puda_buf *i40iw_puda_get_bufpool(struct i40iw_puda_rsrc *rsrc)
-{
- struct i40iw_puda_buf *buf = NULL;
- struct list_head *list = &rsrc->bufpool;
- unsigned long flags;
-
- spin_lock_irqsave(&rsrc->bufpool_lock, flags);
- buf = i40iw_puda_get_listbuf(list);
- if (buf)
- rsrc->avail_buf_count--;
- else
- rsrc->stats_buf_alloc_fail++;
- spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
- return buf;
-}
-
-/**
- * i40iw_puda_ret_bufpool - return buffer to rsrc list
- * @rsrc: resource to use for buffer
- * @buf: buffe to return to resouce
- */
-void i40iw_puda_ret_bufpool(struct i40iw_puda_rsrc *rsrc,
- struct i40iw_puda_buf *buf)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&rsrc->bufpool_lock, flags);
- list_add(&buf->list, &rsrc->bufpool);
- spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
- rsrc->avail_buf_count++;
-}
-
-/**
- * i40iw_puda_post_recvbuf - set wqe for rcv buffer
- * @rsrc: resource ptr
- * @wqe_idx: wqe index to use
- * @buf: puda buffer for rcv q
- * @initial: flag if during init time
- */
-static void i40iw_puda_post_recvbuf(struct i40iw_puda_rsrc *rsrc, u32 wqe_idx,
- struct i40iw_puda_buf *buf, bool initial)
-{
- u64 *wqe;
- struct i40iw_sc_qp *qp = &rsrc->qp;
- u64 offset24 = 0;
-
- qp->qp_uk.rq_wrid_array[wqe_idx] = (uintptr_t)buf;
- wqe = qp->qp_uk.rq_base[wqe_idx].elem;
- i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
- "%s: wqe_idx= %d buf = %p wqe = %p\n", __func__,
- wqe_idx, buf, wqe);
- if (!initial)
- get_64bit_val(wqe, 24, &offset24);
-
- offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID);
-
- set_64bit_val(wqe, 0, buf->mem.pa);
- set_64bit_val(wqe, 8,
- LS_64(buf->mem.size, I40IWQPSQ_FRAG_LEN));
- i40iw_insert_wqe_hdr(wqe, offset24);
-}
-
-/**
- * i40iw_puda_replenish_rq - post rcv buffers
- * @rsrc: resource to use for buffer
- * @initial: flag if during init time
- */
-static enum i40iw_status_code i40iw_puda_replenish_rq(struct i40iw_puda_rsrc *rsrc,
- bool initial)
-{
- u32 i;
- u32 invalid_cnt = rsrc->rxq_invalid_cnt;
- struct i40iw_puda_buf *buf = NULL;
-
- for (i = 0; i < invalid_cnt; i++) {
- buf = i40iw_puda_get_bufpool(rsrc);
- if (!buf)
- return I40IW_ERR_list_empty;
- i40iw_puda_post_recvbuf(rsrc, rsrc->rx_wqe_idx, buf,
- initial);
- rsrc->rx_wqe_idx =
- ((rsrc->rx_wqe_idx + 1) % rsrc->rq_size);
- rsrc->rxq_invalid_cnt--;
- }
- return 0;
-}
-
-/**
- * i40iw_puda_alloc_buf - allocate mem for buffer
- * @dev: iwarp device
- * @length: length of buffer
- */
-static struct i40iw_puda_buf *i40iw_puda_alloc_buf(struct i40iw_sc_dev *dev,
- u32 length)
-{
- struct i40iw_puda_buf *buf = NULL;
- struct i40iw_virt_mem buf_mem;
- enum i40iw_status_code ret;
-
- ret = i40iw_allocate_virt_mem(dev->hw, &buf_mem,
- sizeof(struct i40iw_puda_buf));
- if (ret) {
- i40iw_debug(dev, I40IW_DEBUG_PUDA,
- "%s: error mem for buf\n", __func__);
- return NULL;
- }
- buf = (struct i40iw_puda_buf *)buf_mem.va;
- ret = i40iw_allocate_dma_mem(dev->hw, &buf->mem, length, 1);
- if (ret) {
- i40iw_debug(dev, I40IW_DEBUG_PUDA,
- "%s: error dma mem for buf\n", __func__);
- i40iw_free_virt_mem(dev->hw, &buf_mem);
- return NULL;
- }
- buf->buf_mem.va = buf_mem.va;
- buf->buf_mem.size = buf_mem.size;
- return buf;
-}
-
-/**
- * i40iw_puda_dele_buf - delete buffer back to system
- * @dev: iwarp device
- * @buf: buffer to free
- */
-static void i40iw_puda_dele_buf(struct i40iw_sc_dev *dev,
- struct i40iw_puda_buf *buf)
-{
- i40iw_free_dma_mem(dev->hw, &buf->mem);
- i40iw_free_virt_mem(dev->hw, &buf->buf_mem);
-}
-
-/**
- * i40iw_puda_get_next_send_wqe - return next wqe for processing
- * @qp: puda qp for wqe
- * @wqe_idx: wqe index for caller
- */
-static u64 *i40iw_puda_get_next_send_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx)
-{
- u64 *wqe = NULL;
- enum i40iw_status_code ret_code = 0;
-
- *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
- if (!*wqe_idx)
- qp->swqe_polarity = !qp->swqe_polarity;
- I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);
- if (ret_code)
- return wqe;
- wqe = qp->sq_base[*wqe_idx].elem;
-
- return wqe;
-}
-
-/**
- * i40iw_puda_poll_info - poll cq for completion
- * @cq: cq for poll
- * @info: info return for successful completion
- */
-static enum i40iw_status_code i40iw_puda_poll_info(struct i40iw_sc_cq *cq,
- struct i40iw_puda_completion_info *info)
-{
- u64 qword0, qword2, qword3;
- u64 *cqe;
- u64 comp_ctx;
- bool valid_bit;
- u32 major_err, minor_err;
- bool error;
-
- cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(&cq->cq_uk);
- get_64bit_val(cqe, 24, &qword3);
- valid_bit = (bool)RS_64(qword3, I40IW_CQ_VALID);
-
- if (valid_bit != cq->cq_uk.polarity)
- return I40IW_ERR_QUEUE_EMPTY;
-
- i40iw_debug_buf(cq->dev, I40IW_DEBUG_PUDA, "PUDA CQE", cqe, 32);
- error = (bool)RS_64(qword3, I40IW_CQ_ERROR);
- if (error) {
- i40iw_debug(cq->dev, I40IW_DEBUG_PUDA, "%s receive error\n", __func__);
- major_err = (u32)(RS_64(qword3, I40IW_CQ_MAJERR));
- minor_err = (u32)(RS_64(qword3, I40IW_CQ_MINERR));
- info->compl_error = major_err << 16 | minor_err;
- return I40IW_ERR_CQ_COMPL_ERROR;
- }
-
- get_64bit_val(cqe, 0, &qword0);
- get_64bit_val(cqe, 16, &qword2);
-
- info->q_type = (u8)RS_64(qword3, I40IW_CQ_SQ);
- info->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID);
-
- get_64bit_val(cqe, 8, &comp_ctx);
- info->qp = (struct i40iw_qp_uk *)(unsigned long)comp_ctx;
- info->wqe_idx = (u32)RS_64(qword3, I40IW_CQ_WQEIDX);
-
- if (info->q_type == I40IW_CQE_QTYPE_RQ) {
- info->vlan_valid = (bool)RS_64(qword3, I40IW_VLAN_TAG_VALID);
- info->l4proto = (u8)RS_64(qword2, I40IW_UDA_L4PROTO);
- info->l3proto = (u8)RS_64(qword2, I40IW_UDA_L3PROTO);
- info->payload_len = (u16)RS_64(qword0, I40IW_UDA_PAYLOADLEN);
- }
-
- return 0;
-}
-
-/**
- * i40iw_puda_poll_completion - processes completion for cq
- * @dev: iwarp device
- * @cq: cq getting interrupt
- * @compl_err: return any completion err
- */
-enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev,
- struct i40iw_sc_cq *cq, u32 *compl_err)
-{
- struct i40iw_qp_uk *qp;
- struct i40iw_cq_uk *cq_uk = &cq->cq_uk;
- struct i40iw_puda_completion_info info;
- enum i40iw_status_code ret = 0;
- struct i40iw_puda_buf *buf;
- struct i40iw_puda_rsrc *rsrc;
- void *sqwrid;
- u8 cq_type = cq->cq_type;
- unsigned long flags;
-
- if ((cq_type == I40IW_CQ_TYPE_ILQ) || (cq_type == I40IW_CQ_TYPE_IEQ)) {
- rsrc = (cq_type == I40IW_CQ_TYPE_ILQ) ? cq->vsi->ilq : cq->vsi->ieq;
- } else {
- i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s qp_type error\n", __func__);
- return I40IW_ERR_BAD_PTR;
- }
- memset(&info, 0, sizeof(info));
- ret = i40iw_puda_poll_info(cq, &info);
- *compl_err = info.compl_error;
- if (ret == I40IW_ERR_QUEUE_EMPTY)
- return ret;
- if (ret)
- goto done;
-
- qp = info.qp;
- if (!qp || !rsrc) {
- ret = I40IW_ERR_BAD_PTR;
- goto done;
- }
-
- if (qp->qp_id != rsrc->qp_id) {
- ret = I40IW_ERR_BAD_PTR;
- goto done;
- }
-
- if (info.q_type == I40IW_CQE_QTYPE_RQ) {
- buf = (struct i40iw_puda_buf *)(uintptr_t)qp->rq_wrid_array[info.wqe_idx];
- /* Get all the tcpip information in the buf header */
- ret = i40iw_puda_get_tcpip_info(&info, buf);
- if (ret) {
- rsrc->stats_rcvd_pkt_err++;
- if (cq_type == I40IW_CQ_TYPE_ILQ) {
- i40iw_ilq_putback_rcvbuf(&rsrc->qp,
- info.wqe_idx);
- } else {
- i40iw_puda_ret_bufpool(rsrc, buf);
- i40iw_puda_replenish_rq(rsrc, false);
- }
- goto done;
- }
-
- rsrc->stats_pkt_rcvd++;
- rsrc->compl_rxwqe_idx = info.wqe_idx;
- i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s RQ completion\n", __func__);
- rsrc->receive(rsrc->vsi, buf);
- if (cq_type == I40IW_CQ_TYPE_ILQ)
- i40iw_ilq_putback_rcvbuf(&rsrc->qp, info.wqe_idx);
- else
- i40iw_puda_replenish_rq(rsrc, false);
-
- } else {
- i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s SQ completion\n", __func__);
- sqwrid = (void *)(uintptr_t)qp->sq_wrtrk_array[info.wqe_idx].wrid;
- I40IW_RING_SET_TAIL(qp->sq_ring, info.wqe_idx);
- rsrc->xmit_complete(rsrc->vsi, sqwrid);
- spin_lock_irqsave(&rsrc->bufpool_lock, flags);
- rsrc->tx_wqe_avail_cnt++;
- spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
- if (!list_empty(&rsrc->txpend))
- i40iw_puda_send_buf(rsrc, NULL);
- }
-
-done:
- I40IW_RING_MOVE_HEAD(cq_uk->cq_ring, ret);
- if (I40IW_RING_GETCURRENT_HEAD(cq_uk->cq_ring) == 0)
- cq_uk->polarity = !cq_uk->polarity;
- /* update cq tail in cq shadow memory also */
- I40IW_RING_MOVE_TAIL(cq_uk->cq_ring);
- set_64bit_val(cq_uk->shadow_area, 0,
- I40IW_RING_GETCURRENT_HEAD(cq_uk->cq_ring));
- return 0;
-}
-
-/**
- * i40iw_puda_send - complete send wqe for transmit
- * @qp: puda qp for send
- * @info: buffer information for transmit
- */
-enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp,
- struct i40iw_puda_send_info *info)
-{
- u64 *wqe;
- u32 iplen, l4len;
- u64 header[2];
- u32 wqe_idx;
- u8 iipt;
-
- /* number of 32 bits DWORDS in header */
- l4len = info->tcplen >> 2;
- if (info->ipv4) {
- iipt = 3;
- iplen = 5;
- } else {
- iipt = 1;
- iplen = 10;
- }
-
- wqe = i40iw_puda_get_next_send_wqe(&qp->qp_uk, &wqe_idx);
- if (!wqe)
- return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
- qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid = (uintptr_t)info->scratch;
- /* Third line of WQE descriptor */
- /* maclen is in words */
- header[0] = LS_64((info->maclen >> 1), I40IW_UDA_QPSQ_MACLEN) |
- LS_64(iplen, I40IW_UDA_QPSQ_IPLEN) | LS_64(1, I40IW_UDA_QPSQ_L4T) |
- LS_64(iipt, I40IW_UDA_QPSQ_IIPT) |
- LS_64(l4len, I40IW_UDA_QPSQ_L4LEN);
- /* Forth line of WQE descriptor */
- header[1] = LS_64(I40IW_OP_TYPE_SEND, I40IW_UDA_QPSQ_OPCODE) |
- LS_64(1, I40IW_UDA_QPSQ_SIGCOMPL) |
- LS_64(info->doloopback, I40IW_UDA_QPSQ_DOLOOPBACK) |
- LS_64(qp->qp_uk.swqe_polarity, I40IW_UDA_QPSQ_VALID);
-
- set_64bit_val(wqe, 0, info->paddr);
- set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN));
- set_64bit_val(wqe, 16, header[0]);
-
- i40iw_insert_wqe_hdr(wqe, header[1]);
-
- i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32);
- i40iw_qp_post_wr(&qp->qp_uk);
- return 0;
-}
-
-/**
- * i40iw_puda_send_buf - transmit puda buffer
- * @rsrc: resource to use for buffer
- * @buf: puda buffer to transmit
- */
-void i40iw_puda_send_buf(struct i40iw_puda_rsrc *rsrc, struct i40iw_puda_buf *buf)
-{
- struct i40iw_puda_send_info info;
- enum i40iw_status_code ret = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&rsrc->bufpool_lock, flags);
- /* if no wqe available or not from a completion and we have
- * pending buffers, we must queue new buffer
- */
- if (!rsrc->tx_wqe_avail_cnt || (buf && !list_empty(&rsrc->txpend))) {
- list_add_tail(&buf->list, &rsrc->txpend);
- spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
- rsrc->stats_sent_pkt_q++;
- if (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ)
- i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
- "%s: adding to txpend\n", __func__);
- return;
- }
- rsrc->tx_wqe_avail_cnt--;
- /* if we are coming from a completion and have pending buffers
- * then Get one from pending list
- */
- if (!buf) {
- buf = i40iw_puda_get_listbuf(&rsrc->txpend);
- if (!buf)
- goto done;
- }
-
- info.scratch = (void *)buf;
- info.paddr = buf->mem.pa;
- info.len = buf->totallen;
- info.tcplen = buf->tcphlen;
- info.maclen = buf->maclen;
- info.ipv4 = buf->ipv4;
- info.doloopback = (rsrc->type == I40IW_PUDA_RSRC_TYPE_IEQ);
-
- ret = i40iw_puda_send(&rsrc->qp, &info);
- if (ret) {
- rsrc->tx_wqe_avail_cnt++;
- rsrc->stats_sent_pkt_q++;
- list_add(&buf->list, &rsrc->txpend);
- if (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ)
- i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
- "%s: adding to puda_send\n", __func__);
- } else {
- rsrc->stats_pkt_sent++;
- }
-done:
- spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
-}
-
-/**
- * i40iw_puda_qp_setctx - during init, set qp's context
- * @rsrc: qp's resource
- */
-static void i40iw_puda_qp_setctx(struct i40iw_puda_rsrc *rsrc)
-{
- struct i40iw_sc_qp *qp = &rsrc->qp;
- u64 *qp_ctx = qp->hw_host_ctx;
-
- set_64bit_val(qp_ctx, 8, qp->sq_pa);
- set_64bit_val(qp_ctx, 16, qp->rq_pa);
-
- set_64bit_val(qp_ctx, 24,
- LS_64(qp->hw_rq_size, I40IWQPC_RQSIZE) |
- LS_64(qp->hw_sq_size, I40IWQPC_SQSIZE));
-
- set_64bit_val(qp_ctx, 48, LS_64(rsrc->buf_size, I40IW_UDA_QPC_MAXFRAMESIZE));
- set_64bit_val(qp_ctx, 56, 0);
- set_64bit_val(qp_ctx, 64, 1);
-
- set_64bit_val(qp_ctx, 136,
- LS_64(rsrc->cq_id, I40IWQPC_TXCQNUM) |
- LS_64(rsrc->cq_id, I40IWQPC_RXCQNUM));
-
- set_64bit_val(qp_ctx, 160, LS_64(1, I40IWQPC_PRIVEN));
-
- set_64bit_val(qp_ctx, 168,
- LS_64((uintptr_t)qp, I40IWQPC_QPCOMPCTX));
-
- set_64bit_val(qp_ctx, 176,
- LS_64(qp->sq_tph_val, I40IWQPC_SQTPHVAL) |
- LS_64(qp->rq_tph_val, I40IWQPC_RQTPHVAL) |
- LS_64(qp->qs_handle, I40IWQPC_QSHANDLE));
-
- i40iw_debug_buf(rsrc->dev, I40IW_DEBUG_PUDA, "PUDA QP CONTEXT",
- qp_ctx, I40IW_QP_CTX_SIZE);
-}
-
-/**
- * i40iw_puda_qp_wqe - setup wqe for qp create
- * @dev: iwarp device
- * @qp: resource for qp
- */
-static enum i40iw_status_code i40iw_puda_qp_wqe(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
-{
- struct i40iw_sc_cqp *cqp;
- u64 *wqe;
- u64 header;
- struct i40iw_ccq_cqe_info compl_info;
- enum i40iw_status_code status = 0;
-
- cqp = dev->cqp;
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, 0);
- if (!wqe)
- return I40IW_ERR_RING_FULL;
-
- set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
- set_64bit_val(wqe, 40, qp->shadow_area_pa);
- header = qp->qp_uk.qp_id |
- LS_64(I40IW_CQP_OP_CREATE_QP, I40IW_CQPSQ_OPCODE) |
- LS_64(I40IW_QP_TYPE_UDA, I40IW_CQPSQ_QP_QPTYPE) |
- LS_64(1, I40IW_CQPSQ_QP_CQNUMVALID) |
- LS_64(2, I40IW_CQPSQ_QP_NEXTIWSTATE) |
- LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
-
- i40iw_insert_wqe_hdr(wqe, header);
-
- i40iw_debug_buf(cqp->dev, I40IW_DEBUG_PUDA, "PUDA CQE", wqe, 32);
- i40iw_sc_cqp_post_sq(cqp);
- status = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
- I40IW_CQP_OP_CREATE_QP,
- &compl_info);
- return status;
-}
-
-/**
- * i40iw_puda_qp_create - create qp for resource
- * @rsrc: resource to use for buffer
- */
-static enum i40iw_status_code i40iw_puda_qp_create(struct i40iw_puda_rsrc *rsrc)
-{
- struct i40iw_sc_qp *qp = &rsrc->qp;
- struct i40iw_qp_uk *ukqp = &qp->qp_uk;
- enum i40iw_status_code ret = 0;
- u32 sq_size, rq_size, t_size;
- struct i40iw_dma_mem *mem;
-
- sq_size = rsrc->sq_size * I40IW_QP_WQE_MIN_SIZE;
- rq_size = rsrc->rq_size * I40IW_QP_WQE_MIN_SIZE;
- t_size = (sq_size + rq_size + (I40IW_SHADOW_AREA_SIZE << 3) +
- I40IW_QP_CTX_SIZE);
- /* Get page aligned memory */
- ret =
- i40iw_allocate_dma_mem(rsrc->dev->hw, &rsrc->qpmem, t_size,
- I40IW_HW_PAGE_SIZE);
- if (ret) {
- i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, "%s: error dma mem\n", __func__);
- return ret;
- }
-
- mem = &rsrc->qpmem;
- memset(mem->va, 0, t_size);
- qp->hw_sq_size = i40iw_get_encoded_wqe_size(rsrc->sq_size, false);
- qp->hw_rq_size = i40iw_get_encoded_wqe_size(rsrc->rq_size, false);
- qp->pd = &rsrc->sc_pd;
- qp->qp_type = I40IW_QP_TYPE_UDA;
- qp->dev = rsrc->dev;
- qp->back_qp = (void *)rsrc;
- qp->sq_pa = mem->pa;
- qp->rq_pa = qp->sq_pa + sq_size;
- qp->vsi = rsrc->vsi;
- ukqp->sq_base = mem->va;
- ukqp->rq_base = &ukqp->sq_base[rsrc->sq_size];
- ukqp->shadow_area = ukqp->rq_base[rsrc->rq_size].elem;
- qp->shadow_area_pa = qp->rq_pa + rq_size;
- qp->hw_host_ctx = ukqp->shadow_area + I40IW_SHADOW_AREA_SIZE;
- qp->hw_host_ctx_pa =
- qp->shadow_area_pa + (I40IW_SHADOW_AREA_SIZE << 3);
- ukqp->qp_id = rsrc->qp_id;
- ukqp->sq_wrtrk_array = rsrc->sq_wrtrk_array;
- ukqp->rq_wrid_array = rsrc->rq_wrid_array;
-
- ukqp->qp_id = rsrc->qp_id;
- ukqp->sq_size = rsrc->sq_size;
- ukqp->rq_size = rsrc->rq_size;
-
- I40IW_RING_INIT(ukqp->sq_ring, ukqp->sq_size);
- I40IW_RING_INIT(ukqp->initial_ring, ukqp->sq_size);
- I40IW_RING_INIT(ukqp->rq_ring, ukqp->rq_size);
-
- if (qp->pd->dev->is_pf)
- ukqp->wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) +
- I40E_PFPE_WQEALLOC);
- else
- ukqp->wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) +
- I40E_VFPE_WQEALLOC1);
-
- qp->user_pri = 0;
- i40iw_qp_add_qos(qp);
- i40iw_puda_qp_setctx(rsrc);
- if (rsrc->dev->ceq_valid)
- ret = i40iw_cqp_qp_create_cmd(rsrc->dev, qp);
- else
- ret = i40iw_puda_qp_wqe(rsrc->dev, qp);
- if (ret) {
- i40iw_qp_rem_qos(qp);
- i40iw_free_dma_mem(rsrc->dev->hw, &rsrc->qpmem);
- }
- return ret;
-}
-
-/**
- * i40iw_puda_cq_wqe - setup wqe for cq create
- * @dev: iwarp device
- * @cq: cq to setup
- */
-static enum i40iw_status_code i40iw_puda_cq_wqe(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq)
-{
- u64 *wqe;
- struct i40iw_sc_cqp *cqp;
- u64 header;
- struct i40iw_ccq_cqe_info compl_info;
- enum i40iw_status_code status = 0;
-
- cqp = dev->cqp;
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, 0);
- if (!wqe)
- return I40IW_ERR_RING_FULL;
-
- set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
- set_64bit_val(wqe, 8, RS_64_1(cq, 1));
- set_64bit_val(wqe, 16,
- LS_64(cq->shadow_read_threshold,
- I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
- set_64bit_val(wqe, 32, cq->cq_pa);
-
- set_64bit_val(wqe, 40, cq->shadow_area_pa);
-
- header = cq->cq_uk.cq_id |
- LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |
- LS_64(1, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
- LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) |
- LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) |
- LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
- i40iw_insert_wqe_hdr(wqe, header);
-
- i40iw_debug_buf(dev, I40IW_DEBUG_PUDA, "PUDA CQE",
- wqe, I40IW_CQP_WQE_SIZE * 8);
-
- i40iw_sc_cqp_post_sq(dev->cqp);
- status = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
- I40IW_CQP_OP_CREATE_CQ,
- &compl_info);
- return status;
-}
-
-/**
- * i40iw_puda_cq_create - create cq for resource
- * @rsrc: resource for which cq to create
- */
-static enum i40iw_status_code i40iw_puda_cq_create(struct i40iw_puda_rsrc *rsrc)
-{
- struct i40iw_sc_dev *dev = rsrc->dev;
- struct i40iw_sc_cq *cq = &rsrc->cq;
- enum i40iw_status_code ret = 0;
- u32 tsize, cqsize;
- struct i40iw_dma_mem *mem;
- struct i40iw_cq_init_info info;
- struct i40iw_cq_uk_init_info *init_info = &info.cq_uk_init_info;
-
- cq->vsi = rsrc->vsi;
- cqsize = rsrc->cq_size * (sizeof(struct i40iw_cqe));
- tsize = cqsize + sizeof(struct i40iw_cq_shadow_area);
- ret = i40iw_allocate_dma_mem(dev->hw, &rsrc->cqmem, tsize,
- I40IW_CQ0_ALIGNMENT);
- if (ret)
- return ret;
-
- mem = &rsrc->cqmem;
- memset(&info, 0, sizeof(info));
- info.dev = dev;
- info.type = (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ) ?
- I40IW_CQ_TYPE_ILQ : I40IW_CQ_TYPE_IEQ;
- info.shadow_read_threshold = rsrc->cq_size >> 2;
- info.ceq_id_valid = true;
- info.cq_base_pa = mem->pa;
- info.shadow_area_pa = mem->pa + cqsize;
- init_info->cq_base = mem->va;
- init_info->shadow_area = (u64 *)((u8 *)mem->va + cqsize);
- init_info->cq_size = rsrc->cq_size;
- init_info->cq_id = rsrc->cq_id;
- info.ceqe_mask = true;
- info.ceq_id_valid = true;
- ret = dev->iw_priv_cq_ops->cq_init(cq, &info);
- if (ret)
- goto error;
- if (rsrc->dev->ceq_valid)
- ret = i40iw_cqp_cq_create_cmd(dev, cq);
- else
- ret = i40iw_puda_cq_wqe(dev, cq);
-error:
- if (ret)
- i40iw_free_dma_mem(dev->hw, &rsrc->cqmem);
- return ret;
-}
-
-/**
- * i40iw_puda_free_qp - free qp for resource
- * @rsrc: resource for which qp to free
- */
-static void i40iw_puda_free_qp(struct i40iw_puda_rsrc *rsrc)
-{
- enum i40iw_status_code ret;
- struct i40iw_ccq_cqe_info compl_info;
- struct i40iw_sc_dev *dev = rsrc->dev;
-
- if (rsrc->dev->ceq_valid) {
- i40iw_cqp_qp_destroy_cmd(dev, &rsrc->qp);
- return;
- }
-
- ret = dev->iw_priv_qp_ops->qp_destroy(&rsrc->qp,
- 0, false, true, true);
- if (ret)
- i40iw_debug(dev, I40IW_DEBUG_PUDA,
- "%s error puda qp destroy wqe\n",
- __func__);
-
- if (!ret) {
- ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
- I40IW_CQP_OP_DESTROY_QP,
- &compl_info);
- if (ret)
- i40iw_debug(dev, I40IW_DEBUG_PUDA,
- "%s error puda qp destroy failed\n",
- __func__);
- }
-}
-
-/**
- * i40iw_puda_free_cq - free cq for resource
- * @rsrc: resource for which cq to free
- */
-static void i40iw_puda_free_cq(struct i40iw_puda_rsrc *rsrc)
-{
- enum i40iw_status_code ret;
- struct i40iw_ccq_cqe_info compl_info;
- struct i40iw_sc_dev *dev = rsrc->dev;
-
- if (rsrc->dev->ceq_valid) {
- i40iw_cqp_cq_destroy_cmd(dev, &rsrc->cq);
- return;
- }
- ret = dev->iw_priv_cq_ops->cq_destroy(&rsrc->cq, 0, true);
-
- if (ret)
- i40iw_debug(dev, I40IW_DEBUG_PUDA,
- "%s error ieq cq destroy\n",
- __func__);
-
- if (!ret) {
- ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
- I40IW_CQP_OP_DESTROY_CQ,
- &compl_info);
- if (ret)
- i40iw_debug(dev, I40IW_DEBUG_PUDA,
- "%s error ieq qp destroy done\n",
- __func__);
- }
-}
-
-/**
- * i40iw_puda_dele_resources - delete all resources during close
- * @vsi: pointer to vsi structure
- * @type: type of resource to dele
- * @reset: true if reset chip
- */
-void i40iw_puda_dele_resources(struct i40iw_sc_vsi *vsi,
- enum puda_resource_type type,
- bool reset)
-{
- struct i40iw_sc_dev *dev = vsi->dev;
- struct i40iw_puda_rsrc *rsrc;
- struct i40iw_puda_buf *buf = NULL;
- struct i40iw_puda_buf *nextbuf = NULL;
- struct i40iw_virt_mem *vmem;
-
- switch (type) {
- case I40IW_PUDA_RSRC_TYPE_ILQ:
- rsrc = vsi->ilq;
- vmem = &vsi->ilq_mem;
- break;
- case I40IW_PUDA_RSRC_TYPE_IEQ:
- rsrc = vsi->ieq;
- vmem = &vsi->ieq_mem;
- break;
- default:
- i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s: error resource type = 0x%x\n",
- __func__, type);
- return;
- }
-
- switch (rsrc->completion) {
- case PUDA_HASH_CRC_COMPLETE:
- i40iw_free_hash_desc(rsrc->hash_desc);
- fallthrough;
- case PUDA_QP_CREATED:
- if (!reset)
- i40iw_puda_free_qp(rsrc);
-
- i40iw_free_dma_mem(dev->hw, &rsrc->qpmem);
- fallthrough;
- case PUDA_CQ_CREATED:
- if (!reset)
- i40iw_puda_free_cq(rsrc);
-
- i40iw_free_dma_mem(dev->hw, &rsrc->cqmem);
- break;
- default:
- i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, "%s error no resources\n", __func__);
- break;
- }
- /* Free all allocated puda buffers for both tx and rx */
- buf = rsrc->alloclist;
- while (buf) {
- nextbuf = buf->next;
- i40iw_puda_dele_buf(dev, buf);
- buf = nextbuf;
- rsrc->alloc_buf_count--;
- }
- i40iw_free_virt_mem(dev->hw, vmem);
-}
-
-/**
- * i40iw_puda_allocbufs - allocate buffers for resource
- * @rsrc: resource for buffer allocation
- * @count: number of buffers to create
- */
-static enum i40iw_status_code i40iw_puda_allocbufs(struct i40iw_puda_rsrc *rsrc,
- u32 count)
-{
- u32 i;
- struct i40iw_puda_buf *buf;
- struct i40iw_puda_buf *nextbuf;
-
- for (i = 0; i < count; i++) {
- buf = i40iw_puda_alloc_buf(rsrc->dev, rsrc->buf_size);
- if (!buf) {
- rsrc->stats_buf_alloc_fail++;
- return I40IW_ERR_NO_MEMORY;
- }
- i40iw_puda_ret_bufpool(rsrc, buf);
- rsrc->alloc_buf_count++;
- if (!rsrc->alloclist) {
- rsrc->alloclist = buf;
- } else {
- nextbuf = rsrc->alloclist;
- rsrc->alloclist = buf;
- buf->next = nextbuf;
- }
- }
- rsrc->avail_buf_count = rsrc->alloc_buf_count;
- return 0;
-}
-
-/**
- * i40iw_puda_create_rsrc - create resouce (ilq or ieq)
- * @vsi: pointer to vsi structure
- * @info: resource information
- */
-enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_vsi *vsi,
- struct i40iw_puda_rsrc_info *info)
-{
- struct i40iw_sc_dev *dev = vsi->dev;
- enum i40iw_status_code ret = 0;
- struct i40iw_puda_rsrc *rsrc;
- u32 pudasize;
- u32 sqwridsize, rqwridsize;
- struct i40iw_virt_mem *vmem;
-
- info->count = 1;
- pudasize = sizeof(struct i40iw_puda_rsrc);
- sqwridsize = info->sq_size * sizeof(struct i40iw_sq_uk_wr_trk_info);
- rqwridsize = info->rq_size * 8;
- switch (info->type) {
- case I40IW_PUDA_RSRC_TYPE_ILQ:
- vmem = &vsi->ilq_mem;
- break;
- case I40IW_PUDA_RSRC_TYPE_IEQ:
- vmem = &vsi->ieq_mem;
- break;
- default:
- return I40IW_NOT_SUPPORTED;
- }
- ret =
- i40iw_allocate_virt_mem(dev->hw, vmem,
- pudasize + sqwridsize + rqwridsize);
- if (ret)
- return ret;
- rsrc = (struct i40iw_puda_rsrc *)vmem->va;
- spin_lock_init(&rsrc->bufpool_lock);
- if (info->type == I40IW_PUDA_RSRC_TYPE_ILQ) {
- vsi->ilq = (struct i40iw_puda_rsrc *)vmem->va;
- vsi->ilq_count = info->count;
- rsrc->receive = info->receive;
- rsrc->xmit_complete = info->xmit_complete;
- } else {
- vmem = &vsi->ieq_mem;
- vsi->ieq_count = info->count;
- vsi->ieq = (struct i40iw_puda_rsrc *)vmem->va;
- rsrc->receive = i40iw_ieq_receive;
- rsrc->xmit_complete = i40iw_ieq_tx_compl;
- }
-
- rsrc->type = info->type;
- rsrc->sq_wrtrk_array = (struct i40iw_sq_uk_wr_trk_info *)((u8 *)vmem->va + pudasize);
- rsrc->rq_wrid_array = (u64 *)((u8 *)vmem->va + pudasize + sqwridsize);
- /* Initialize all ieq lists */
- INIT_LIST_HEAD(&rsrc->bufpool);
- INIT_LIST_HEAD(&rsrc->txpend);
-
- rsrc->tx_wqe_avail_cnt = info->sq_size - 1;
- dev->iw_pd_ops->pd_init(dev, &rsrc->sc_pd, info->pd_id, -1);
- rsrc->qp_id = info->qp_id;
- rsrc->cq_id = info->cq_id;
- rsrc->sq_size = info->sq_size;
- rsrc->rq_size = info->rq_size;
- rsrc->cq_size = info->rq_size + info->sq_size;
- rsrc->buf_size = info->buf_size;
- rsrc->dev = dev;
- rsrc->vsi = vsi;
-
- ret = i40iw_puda_cq_create(rsrc);
- if (!ret) {
- rsrc->completion = PUDA_CQ_CREATED;
- ret = i40iw_puda_qp_create(rsrc);
- }
- if (ret) {
- i40iw_debug(dev, I40IW_DEBUG_PUDA, "[%s] error qp_create\n",
- __func__);
- goto error;
- }
- rsrc->completion = PUDA_QP_CREATED;
-
- ret = i40iw_puda_allocbufs(rsrc, info->tx_buf_cnt + info->rq_size);
- if (ret) {
- i40iw_debug(dev, I40IW_DEBUG_PUDA, "[%s] error alloc_buf\n",
- __func__);
- goto error;
- }
-
- rsrc->rxq_invalid_cnt = info->rq_size;
- ret = i40iw_puda_replenish_rq(rsrc, true);
- if (ret)
- goto error;
-
- if (info->type == I40IW_PUDA_RSRC_TYPE_IEQ) {
- if (!i40iw_init_hash_desc(&rsrc->hash_desc)) {
- rsrc->check_crc = true;
- rsrc->completion = PUDA_HASH_CRC_COMPLETE;
- ret = 0;
- }
- }
-
- dev->ccq_ops->ccq_arm(&rsrc->cq);
- return ret;
- error:
- i40iw_puda_dele_resources(vsi, info->type, false);
-
- return ret;
-}
-
-/**
- * i40iw_ilq_putback_rcvbuf - ilq buffer to put back on rq
- * @qp: ilq's qp resource
- * @wqe_idx: wqe index of completed rcvbuf
- */
-static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp *qp, u32 wqe_idx)
-{
- u64 *wqe;
- u64 offset24;
-
- wqe = qp->qp_uk.rq_base[wqe_idx].elem;
- get_64bit_val(wqe, 24, &offset24);
- offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID);
- set_64bit_val(wqe, 24, offset24);
-}
-
-/**
- * i40iw_ieq_get_fpdu_length - given length return fpdu length
- * @length: length if fpdu
- */
-static u16 i40iw_ieq_get_fpdu_length(u16 length)
-{
- u16 fpdu_len;
-
- fpdu_len = length + I40IW_IEQ_MPA_FRAMING;
- fpdu_len = (fpdu_len + 3) & 0xfffffffc;
- return fpdu_len;
-}
-
-/**
- * i40iw_ieq_copy_to_txbuf - copydata from rcv buf to tx buf
- * @buf: rcv buffer with partial
- * @txbuf: tx buffer for sendign back
- * @buf_offset: rcv buffer offset to copy from
- * @txbuf_offset: at offset in tx buf to copy
- * @length: length of data to copy
- */
-static void i40iw_ieq_copy_to_txbuf(struct i40iw_puda_buf *buf,
- struct i40iw_puda_buf *txbuf,
- u16 buf_offset, u32 txbuf_offset,
- u32 length)
-{
- void *mem1 = (u8 *)buf->mem.va + buf_offset;
- void *mem2 = (u8 *)txbuf->mem.va + txbuf_offset;
-
- memcpy(mem2, mem1, length);
-}
-
-/**
- * i40iw_ieq_setup_tx_buf - setup tx buffer for partial handling
- * @buf: reeive buffer with partial
- * @txbuf: buffer to prepare
- */
-static void i40iw_ieq_setup_tx_buf(struct i40iw_puda_buf *buf,
- struct i40iw_puda_buf *txbuf)
-{
- txbuf->maclen = buf->maclen;
- txbuf->tcphlen = buf->tcphlen;
- txbuf->ipv4 = buf->ipv4;
- txbuf->hdrlen = buf->hdrlen;
- i40iw_ieq_copy_to_txbuf(buf, txbuf, 0, 0, buf->hdrlen);
-}
-
-/**
- * i40iw_ieq_check_first_buf - check if rcv buffer's seq is in range
- * @buf: receive exception buffer
- * @fps: first partial sequence number
- */
-static void i40iw_ieq_check_first_buf(struct i40iw_puda_buf *buf, u32 fps)
-{
- u32 offset;
-
- if (buf->seqnum < fps) {
- offset = fps - buf->seqnum;
- if (offset > buf->datalen)
- return;
- buf->data += offset;
- buf->datalen -= (u16)offset;
- buf->seqnum = fps;
- }
-}
-
-/**
- * i40iw_ieq_compl_pfpdu - write txbuf with full fpdu
- * @ieq: ieq resource
- * @rxlist: ieq's received buffer list
- * @pbufl: temporary list for buffers for fpddu
- * @txbuf: tx buffer for fpdu
- * @fpdu_len: total length of fpdu
- */
-static void i40iw_ieq_compl_pfpdu(struct i40iw_puda_rsrc *ieq,
- struct list_head *rxlist,
- struct list_head *pbufl,
- struct i40iw_puda_buf *txbuf,
- u16 fpdu_len)
-{
- struct i40iw_puda_buf *buf;
- u32 nextseqnum;
- u16 txoffset, bufoffset;
-
- buf = i40iw_puda_get_listbuf(pbufl);
- if (!buf)
- return;
- nextseqnum = buf->seqnum + fpdu_len;
- txbuf->totallen = buf->hdrlen + fpdu_len;
- txbuf->data = (u8 *)txbuf->mem.va + buf->hdrlen;
- i40iw_ieq_setup_tx_buf(buf, txbuf);
-
- txoffset = buf->hdrlen;
- bufoffset = (u16)(buf->data - (u8 *)buf->mem.va);
-
- do {
- if (buf->datalen >= fpdu_len) {
- /* copied full fpdu */
- i40iw_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset, fpdu_len);
- buf->datalen -= fpdu_len;
- buf->data += fpdu_len;
- buf->seqnum = nextseqnum;
- break;
- }
- /* copy partial fpdu */
- i40iw_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset, buf->datalen);
- txoffset += buf->datalen;
- fpdu_len -= buf->datalen;
- i40iw_puda_ret_bufpool(ieq, buf);
- buf = i40iw_puda_get_listbuf(pbufl);
- if (!buf)
- return;
- bufoffset = (u16)(buf->data - (u8 *)buf->mem.va);
- } while (1);
-
- /* last buffer on the list*/
- if (buf->datalen)
- list_add(&buf->list, rxlist);
- else
- i40iw_puda_ret_bufpool(ieq, buf);
-}
-
-/**
- * i40iw_ieq_create_pbufl - create buffer list for single fpdu
- * @pfpdu: partial management per user qp
- * @rxlist: resource list for receive ieq buffes
- * @pbufl: temp. list for buffers for fpddu
- * @buf: first receive buffer
- * @fpdu_len: total length of fpdu
- */
-static enum i40iw_status_code i40iw_ieq_create_pbufl(
- struct i40iw_pfpdu *pfpdu,
- struct list_head *rxlist,
- struct list_head *pbufl,
- struct i40iw_puda_buf *buf,
- u16 fpdu_len)
-{
- enum i40iw_status_code status = 0;
- struct i40iw_puda_buf *nextbuf;
- u32 nextseqnum;
- u16 plen = fpdu_len - buf->datalen;
- bool done = false;
-
- nextseqnum = buf->seqnum + buf->datalen;
- do {
- nextbuf = i40iw_puda_get_listbuf(rxlist);
- if (!nextbuf) {
- status = I40IW_ERR_list_empty;
- break;
- }
- list_add_tail(&nextbuf->list, pbufl);
- if (nextbuf->seqnum != nextseqnum) {
- pfpdu->bad_seq_num++;
- status = I40IW_ERR_SEQ_NUM;
- break;
- }
- if (nextbuf->datalen >= plen) {
- done = true;
- } else {
- plen -= nextbuf->datalen;
- nextseqnum = nextbuf->seqnum + nextbuf->datalen;
- }
-
- } while (!done);
-
- return status;
-}
-
-/**
- * i40iw_ieq_handle_partial - process partial fpdu buffer
- * @ieq: ieq resource
- * @pfpdu: partial management per user qp
- * @buf: receive buffer
- * @fpdu_len: fpdu len in the buffer
- */
-static enum i40iw_status_code i40iw_ieq_handle_partial(struct i40iw_puda_rsrc *ieq,
- struct i40iw_pfpdu *pfpdu,
- struct i40iw_puda_buf *buf,
- u16 fpdu_len)
-{
- enum i40iw_status_code status = 0;
- u8 *crcptr;
- u32 mpacrc;
- u32 seqnum = buf->seqnum;
- struct list_head pbufl; /* partial buffer list */
- struct i40iw_puda_buf *txbuf = NULL;
- struct list_head *rxlist = &pfpdu->rxlist;
-
- INIT_LIST_HEAD(&pbufl);
- list_add(&buf->list, &pbufl);
-
- status = i40iw_ieq_create_pbufl(pfpdu, rxlist, &pbufl, buf, fpdu_len);
- if (status)
- goto error;
-
- txbuf = i40iw_puda_get_bufpool(ieq);
- if (!txbuf) {
- pfpdu->no_tx_bufs++;
- status = I40IW_ERR_NO_TXBUFS;
- goto error;
- }
-
- i40iw_ieq_compl_pfpdu(ieq, rxlist, &pbufl, txbuf, fpdu_len);
- i40iw_ieq_update_tcpip_info(txbuf, fpdu_len, seqnum);
- crcptr = txbuf->data + fpdu_len - 4;
- mpacrc = *(u32 *)crcptr;
- if (ieq->check_crc) {
- status = i40iw_ieq_check_mpacrc(ieq->hash_desc, txbuf->data,
- (fpdu_len - 4), mpacrc);
- if (status) {
- i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
- "%s: error bad crc\n", __func__);
- goto error;
- }
- }
-
- i40iw_debug_buf(ieq->dev, I40IW_DEBUG_IEQ, "IEQ TX BUFFER",
- txbuf->mem.va, txbuf->totallen);
- i40iw_puda_send_buf(ieq, txbuf);
- pfpdu->rcv_nxt = seqnum + fpdu_len;
- return status;
- error:
- while (!list_empty(&pbufl)) {
- buf = (struct i40iw_puda_buf *)(pbufl.prev);
- list_del(&buf->list);
- list_add(&buf->list, rxlist);
- }
- if (txbuf)
- i40iw_puda_ret_bufpool(ieq, txbuf);
- return status;
-}
-
-/**
- * i40iw_ieq_process_buf - process buffer rcvd for ieq
- * @ieq: ieq resource
- * @pfpdu: partial management per user qp
- * @buf: receive buffer
- */
-static enum i40iw_status_code i40iw_ieq_process_buf(struct i40iw_puda_rsrc *ieq,
- struct i40iw_pfpdu *pfpdu,
- struct i40iw_puda_buf *buf)
-{
- u16 fpdu_len = 0;
- u16 datalen = buf->datalen;
- u8 *datap = buf->data;
- u8 *crcptr;
- u16 ioffset = 0;
- u32 mpacrc;
- u32 seqnum = buf->seqnum;
- u16 length = 0;
- u16 full = 0;
- bool partial = false;
- struct i40iw_puda_buf *txbuf;
- struct list_head *rxlist = &pfpdu->rxlist;
- enum i40iw_status_code ret = 0;
- enum i40iw_status_code status = 0;
-
- ioffset = (u16)(buf->data - (u8 *)buf->mem.va);
- while (datalen) {
- fpdu_len = i40iw_ieq_get_fpdu_length(ntohs(*(__be16 *)datap));
- if (fpdu_len > pfpdu->max_fpdu_data) {
- i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
- "%s: error bad fpdu_len\n", __func__);
- status = I40IW_ERR_MPA_CRC;
- list_add(&buf->list, rxlist);
- return status;
- }
-
- if (datalen < fpdu_len) {
- partial = true;
- break;
- }
- crcptr = datap + fpdu_len - 4;
- mpacrc = *(u32 *)crcptr;
- if (ieq->check_crc)
- ret = i40iw_ieq_check_mpacrc(ieq->hash_desc,
- datap, fpdu_len - 4, mpacrc);
- if (ret) {
- status = I40IW_ERR_MPA_CRC;
- list_add(&buf->list, rxlist);
- return status;
- }
- full++;
- pfpdu->fpdu_processed++;
- datap += fpdu_len;
- length += fpdu_len;
- datalen -= fpdu_len;
- }
- if (full) {
- /* copy full pdu's in the txbuf and send them out */
- txbuf = i40iw_puda_get_bufpool(ieq);
- if (!txbuf) {
- pfpdu->no_tx_bufs++;
- status = I40IW_ERR_NO_TXBUFS;
- list_add(&buf->list, rxlist);
- return status;
- }
- /* modify txbuf's buffer header */
- i40iw_ieq_setup_tx_buf(buf, txbuf);
- /* copy full fpdu's to new buffer */
- i40iw_ieq_copy_to_txbuf(buf, txbuf, ioffset, buf->hdrlen,
- length);
- txbuf->totallen = buf->hdrlen + length;
-
- i40iw_ieq_update_tcpip_info(txbuf, length, buf->seqnum);
- i40iw_puda_send_buf(ieq, txbuf);
-
- if (!datalen) {
- pfpdu->rcv_nxt = buf->seqnum + length;
- i40iw_puda_ret_bufpool(ieq, buf);
- return status;
- }
- buf->data = datap;
- buf->seqnum = seqnum + length;
- buf->datalen = datalen;
- pfpdu->rcv_nxt = buf->seqnum;
- }
- if (partial)
- status = i40iw_ieq_handle_partial(ieq, pfpdu, buf, fpdu_len);
-
- return status;
-}
-
-/**
- * i40iw_ieq_process_fpdus - process fpdu's buffers on its list
- * @qp: qp for which partial fpdus
- * @ieq: ieq resource
- */
-static void i40iw_ieq_process_fpdus(struct i40iw_sc_qp *qp,
- struct i40iw_puda_rsrc *ieq)
-{
- struct i40iw_pfpdu *pfpdu = &qp->pfpdu;
- struct list_head *rxlist = &pfpdu->rxlist;
- struct i40iw_puda_buf *buf;
- enum i40iw_status_code status;
-
- do {
- if (list_empty(rxlist))
- break;
- buf = i40iw_puda_get_listbuf(rxlist);
- if (!buf) {
- i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
- "%s: error no buf\n", __func__);
- break;
- }
- if (buf->seqnum != pfpdu->rcv_nxt) {
- /* This could be out of order or missing packet */
- pfpdu->out_of_order++;
- list_add(&buf->list, rxlist);
- break;
- }
- /* keep processing buffers from the head of the list */
- status = i40iw_ieq_process_buf(ieq, pfpdu, buf);
- if (status == I40IW_ERR_MPA_CRC) {
- pfpdu->mpa_crc_err = true;
- while (!list_empty(rxlist)) {
- buf = i40iw_puda_get_listbuf(rxlist);
- i40iw_puda_ret_bufpool(ieq, buf);
- pfpdu->crc_err++;
- }
- /* create CQP for AE */
- i40iw_ieq_mpa_crc_ae(ieq->dev, qp);
- }
- } while (!status);
-}
-
-/**
- * i40iw_ieq_handle_exception - handle qp's exception
- * @ieq: ieq resource
- * @qp: qp receiving excpetion
- * @buf: receive buffer
- */
-static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,
- struct i40iw_sc_qp *qp,
- struct i40iw_puda_buf *buf)
-{
- struct i40iw_puda_buf *tmpbuf = NULL;
- struct i40iw_pfpdu *pfpdu = &qp->pfpdu;
- u32 *hw_host_ctx = (u32 *)qp->hw_host_ctx;
- u32 rcv_wnd = hw_host_ctx[23];
- /* first partial seq # in q2 */
- u32 fps = *(u32 *)(qp->q2_buf + Q2_FPSN_OFFSET);
- struct list_head *rxlist = &pfpdu->rxlist;
- struct list_head *plist;
-
- pfpdu->total_ieq_bufs++;
-
- if (pfpdu->mpa_crc_err) {
- pfpdu->crc_err++;
- goto error;
- }
- if (pfpdu->mode && (fps != pfpdu->fps)) {
- /* clean up qp as it is new partial sequence */
- i40iw_ieq_cleanup_qp(ieq, qp);
- i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
- "%s: restarting new partial\n", __func__);
- pfpdu->mode = false;
- }
-
- if (!pfpdu->mode) {
- i40iw_debug_buf(ieq->dev, I40IW_DEBUG_IEQ, "Q2 BUFFER", (u64 *)qp->q2_buf, 128);
- /* First_Partial_Sequence_Number check */
- pfpdu->rcv_nxt = fps;
- pfpdu->fps = fps;
- pfpdu->mode = true;
- pfpdu->max_fpdu_data = (buf->ipv4) ? (ieq->vsi->mtu - I40IW_MTU_TO_MSS_IPV4) :
- (ieq->vsi->mtu - I40IW_MTU_TO_MSS_IPV6);
- pfpdu->pmode_count++;
- INIT_LIST_HEAD(rxlist);
- i40iw_ieq_check_first_buf(buf, fps);
- }
-
- if (!(rcv_wnd >= (buf->seqnum - pfpdu->rcv_nxt))) {
- pfpdu->bad_seq_num++;
- goto error;
- }
-
- if (!list_empty(rxlist)) {
- tmpbuf = (struct i40iw_puda_buf *)rxlist->next;
- while ((struct list_head *)tmpbuf != rxlist) {
- if ((int)(buf->seqnum - tmpbuf->seqnum) < 0)
- break;
- plist = &tmpbuf->list;
- tmpbuf = (struct i40iw_puda_buf *)plist->next;
- }
- /* Insert buf before tmpbuf */
- list_add_tail(&buf->list, &tmpbuf->list);
- } else {
- list_add_tail(&buf->list, rxlist);
- }
- i40iw_ieq_process_fpdus(qp, ieq);
- return;
- error:
- i40iw_puda_ret_bufpool(ieq, buf);
-}
-
-/**
- * i40iw_ieq_receive - received exception buffer
- * @vsi: pointer to vsi structure
- * @buf: exception buffer received
- */
-static void i40iw_ieq_receive(struct i40iw_sc_vsi *vsi,
- struct i40iw_puda_buf *buf)
-{
- struct i40iw_puda_rsrc *ieq = vsi->ieq;
- struct i40iw_sc_qp *qp = NULL;
- u32 wqe_idx = ieq->compl_rxwqe_idx;
-
- qp = i40iw_ieq_get_qp(vsi->dev, buf);
- if (!qp) {
- ieq->stats_bad_qp_id++;
- i40iw_puda_ret_bufpool(ieq, buf);
- } else {
- i40iw_ieq_handle_exception(ieq, qp, buf);
- }
- /*
- * ieq->rx_wqe_idx is used by i40iw_puda_replenish_rq()
- * on which wqe_idx to start replenish rq
- */
- if (!ieq->rxq_invalid_cnt)
- ieq->rx_wqe_idx = wqe_idx;
- ieq->rxq_invalid_cnt++;
-}
-
-/**
- * i40iw_ieq_tx_compl - put back after sending completed exception buffer
- * @vsi: pointer to the vsi structure
- * @sqwrid: pointer to puda buffer
- */
-static void i40iw_ieq_tx_compl(struct i40iw_sc_vsi *vsi, void *sqwrid)
-{
- struct i40iw_puda_rsrc *ieq = vsi->ieq;
- struct i40iw_puda_buf *buf = (struct i40iw_puda_buf *)sqwrid;
-
- i40iw_puda_ret_bufpool(ieq, buf);
-}
-
-/**
- * i40iw_ieq_cleanup_qp - qp is being destroyed
- * @ieq: ieq resource
- * @qp: all pending fpdu buffers
- */
-void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp)
-{
- struct i40iw_puda_buf *buf;
- struct i40iw_pfpdu *pfpdu = &qp->pfpdu;
- struct list_head *rxlist = &pfpdu->rxlist;
-
- if (!pfpdu->mode)
- return;
- while (!list_empty(rxlist)) {
- buf = i40iw_puda_get_listbuf(rxlist);
- i40iw_puda_ret_bufpool(ieq, buf);
- }
-}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.h b/drivers/infiniband/hw/i40iw/i40iw_puda.h
deleted file mode 100644
index 53a7d58c84b5..000000000000
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.h
+++ /dev/null
@@ -1,188 +0,0 @@
-/*******************************************************************************
-*
-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
-*
-* This software is available to you under a choice of one of two
-* licenses. You may choose to be licensed under the terms of the GNU
-* General Public License (GPL) Version 2, available from the file
-* COPYING in the main directory of this source tree, or the
-* OpenFabrics.org BSD license below:
-*
-* Redistribution and use in source and binary forms, with or
-* without modification, are permitted provided that the following
-* conditions are met:
-*
-* - Redistributions of source code must retain the above
-* copyright notice, this list of conditions and the following
-* disclaimer.
-*
-* - Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials
-* provided with the distribution.
-*
-* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-* SOFTWARE.
-*
-*******************************************************************************/
-
-#ifndef I40IW_PUDA_H
-#define I40IW_PUDA_H
-
-#define I40IW_IEQ_MPA_FRAMING 6
-
-struct i40iw_sc_dev;
-struct i40iw_sc_qp;
-struct i40iw_sc_cq;
-
-enum puda_resource_type {
- I40IW_PUDA_RSRC_TYPE_ILQ = 1,
- I40IW_PUDA_RSRC_TYPE_IEQ
-};
-
-enum puda_rsrc_complete {
- PUDA_CQ_CREATED = 1,
- PUDA_QP_CREATED,
- PUDA_TX_COMPLETE,
- PUDA_RX_COMPLETE,
- PUDA_HASH_CRC_COMPLETE
-};
-
-struct i40iw_puda_completion_info {
- struct i40iw_qp_uk *qp;
- u8 q_type;
- u8 vlan_valid;
- u8 l3proto;
- u8 l4proto;
- u16 payload_len;
- u32 compl_error; /* No_err=0, else major and minor err code */
- u32 qp_id;
- u32 wqe_idx;
-};
-
-struct i40iw_puda_send_info {
- u64 paddr; /* Physical address */
- u32 len;
- u8 tcplen;
- u8 maclen;
- bool ipv4;
- bool doloopback;
- void *scratch;
-};
-
-struct i40iw_puda_buf {
- struct list_head list; /* MUST be first entry */
- struct i40iw_dma_mem mem; /* DMA memory for the buffer */
- struct i40iw_puda_buf *next; /* for alloclist in rsrc struct */
- struct i40iw_virt_mem buf_mem; /* Buffer memory for this buffer */
- void *scratch;
- u8 *iph;
- u8 *tcph;
- u8 *data;
- u16 datalen;
- u16 vlan_id;
- u8 tcphlen; /* tcp length in bytes */
- u8 maclen; /* mac length in bytes */
- u32 totallen; /* machlen+iphlen+tcphlen+datalen */
- atomic_t refcount;
- u8 hdrlen;
- bool ipv4;
- u32 seqnum;
-};
-
-struct i40iw_puda_rsrc_info {
- enum puda_resource_type type; /* ILQ or IEQ */
- u32 count;
- u16 pd_id;
- u32 cq_id;
- u32 qp_id;
- u32 sq_size;
- u32 rq_size;
- u16 buf_size;
- u16 mss;
- u32 tx_buf_cnt; /* total bufs allocated will be rq_size + tx_buf_cnt */
- void (*receive)(struct i40iw_sc_vsi *, struct i40iw_puda_buf *);
- void (*xmit_complete)(struct i40iw_sc_vsi *, void *);
-};
-
-struct i40iw_puda_rsrc {
- struct i40iw_sc_cq cq;
- struct i40iw_sc_qp qp;
- struct i40iw_sc_pd sc_pd;
- struct i40iw_sc_dev *dev;
- struct i40iw_sc_vsi *vsi;
- struct i40iw_dma_mem cqmem;
- struct i40iw_dma_mem qpmem;
- struct i40iw_virt_mem ilq_mem;
- enum puda_rsrc_complete completion;
- enum puda_resource_type type;
- u16 buf_size; /*buffer must be max datalen + tcpip hdr + mac */
- u16 mss;
- u32 cq_id;
- u32 qp_id;
- u32 sq_size;
- u32 rq_size;
- u32 cq_size;
- struct i40iw_sq_uk_wr_trk_info *sq_wrtrk_array;
- u64 *rq_wrid_array;
- u32 compl_rxwqe_idx;
- u32 rx_wqe_idx;
- u32 rxq_invalid_cnt;
- u32 tx_wqe_avail_cnt;
- bool check_crc;
- struct shash_desc *hash_desc;
- struct list_head txpend;
- struct list_head bufpool; /* free buffers pool list for recv and xmit */
- u32 alloc_buf_count;
- u32 avail_buf_count; /* snapshot of currently available buffers */
- spinlock_t bufpool_lock;
- struct i40iw_puda_buf *alloclist;
- void (*receive)(struct i40iw_sc_vsi *, struct i40iw_puda_buf *);
- void (*xmit_complete)(struct i40iw_sc_vsi *, void *);
- /* puda stats */
- u64 stats_buf_alloc_fail;
- u64 stats_pkt_rcvd;
- u64 stats_pkt_sent;
- u64 stats_rcvd_pkt_err;
- u64 stats_sent_pkt_q;
- u64 stats_bad_qp_id;
-};
-
-struct i40iw_puda_buf *i40iw_puda_get_bufpool(struct i40iw_puda_rsrc *rsrc);
-void i40iw_puda_ret_bufpool(struct i40iw_puda_rsrc *rsrc,
- struct i40iw_puda_buf *buf);
-void i40iw_puda_send_buf(struct i40iw_puda_rsrc *rsrc,
- struct i40iw_puda_buf *buf);
-enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp,
- struct i40iw_puda_send_info *info);
-enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_vsi *vsi,
- struct i40iw_puda_rsrc_info *info);
-void i40iw_puda_dele_resources(struct i40iw_sc_vsi *vsi,
- enum puda_resource_type type,
- bool reset);
-enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev,
- struct i40iw_sc_cq *cq, u32 *compl_err);
-
-struct i40iw_sc_qp *i40iw_ieq_get_qp(struct i40iw_sc_dev *dev,
- struct i40iw_puda_buf *buf);
-enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_info *info,
- struct i40iw_puda_buf *buf);
-enum i40iw_status_code i40iw_ieq_check_mpacrc(struct shash_desc *desc,
- void *addr, u32 length, u32 value);
-enum i40iw_status_code i40iw_init_hash_desc(struct shash_desc **desc);
-void i40iw_ieq_mpa_crc_ae(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
-void i40iw_free_hash_desc(struct shash_desc *desc);
-void i40iw_ieq_update_tcpip_info(struct i40iw_puda_buf *buf, u16 length,
- u32 seqnum);
-enum i40iw_status_code i40iw_cqp_qp_create_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
-enum i40iw_status_code i40iw_cqp_cq_create_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq);
-void i40iw_cqp_qp_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
-void i40iw_cqp_cq_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq);
-void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp);
-#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_register.h b/drivers/infiniband/hw/i40iw/i40iw_register.h
deleted file mode 100644
index 57768184e251..000000000000
--- a/drivers/infiniband/hw/i40iw/i40iw_register.h
+++ /dev/null
@@ -1,1030 +0,0 @@
-/*******************************************************************************
-*
-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
-*
-* This software is available to you under a choice of one of two
-* licenses. You may choose to be licensed under the terms of the GNU
-* General Public License (GPL) Version 2, available from the file
-* COPYING in the main directory of this source tree, or the
-* OpenFabrics.org BSD license below:
-*
-* Redistribution and use in source and binary forms, with or
-* without modification, are permitted provided that the following
-* conditions are met:
-*
-* - Redistributions of source code must retain the above
-* copyright notice, this list of conditions and the following
-* disclaimer.
-*
-* - Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials
-* provided with the distribution.
-*
-* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-* SOFTWARE.
-*
-*******************************************************************************/
-
-#ifndef I40IW_REGISTER_H
-#define I40IW_REGISTER_H
-
-#define I40E_GLGEN_STAT 0x000B612C /* Reset: POR */
-
-#define I40E_PFHMC_PDINV 0x000C0300 /* Reset: PFR */
-#define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0
-#define I40E_PFHMC_PDINV_PMSDIDX_MASK (0xFFF << I40E_PFHMC_PDINV_PMSDIDX_SHIFT)
-#define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16
-#define I40E_PFHMC_PDINV_PMPDIDX_MASK (0x1FF << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)
-#define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31
-#define I40E_PFHMC_SDCMD_PMSDWR_MASK (0x1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT)
-#define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0
-#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK (0x1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT)
-#define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1
-#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK (0x1 << I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT)
-#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2
-#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK (0x3FF << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT)
-
-#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
-#define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0
-#define I40E_PFINT_DYN_CTLN_INTENA_MASK (0x1 << I40E_PFINT_DYN_CTLN_INTENA_SHIFT)
-#define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1
-#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK (0x1 << I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT)
-#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3
-#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)
-
-#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */
-#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-
-#define I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT 15
-#define I40E_PFHMC_PDINV_PMSDPARTSEL_MASK (0x1 << I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT)
-#define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */
-#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4
-#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK (0x3 << I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT)
-#define I40E_GLPCI_DREVID 0x0009C480 /* Reset: PCIR */
-#define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0
-#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK 0xFF
-
-#define I40E_PFPE_AEQALLOC 0x00131180 /* Reset: PFR */
-#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0
-#define I40E_PFPE_AEQALLOC_AECOUNT_MASK (0xFFFFFFFF << I40E_PFPE_AEQALLOC_AECOUNT_SHIFT)
-#define I40E_PFPE_CCQPHIGH 0x00008200 /* Reset: PFR */
-#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
-#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
-#define I40E_PFPE_CCQPLOW 0x00008180 /* Reset: PFR */
-#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0
-#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK (0xFFFFFFFF << I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT)
-#define I40E_PFPE_CCQPSTATUS 0x00008100 /* Reset: PFR */
-#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
-#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK (0x1 << I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
-#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4
-#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_MASK (0x7 << I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT)
-#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16
-#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_MASK (0x3F << I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT)
-#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
-#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK (0x1 << I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
-#define I40E_PFPE_CQACK 0x00131100 /* Reset: PFR */
-#define I40E_PFPE_CQACK_PECQID_SHIFT 0
-#define I40E_PFPE_CQACK_PECQID_MASK (0x1FFFF << I40E_PFPE_CQACK_PECQID_SHIFT)
-#define I40E_PFPE_CQARM 0x00131080 /* Reset: PFR */
-#define I40E_PFPE_CQARM_PECQID_SHIFT 0
-#define I40E_PFPE_CQARM_PECQID_MASK (0x1FFFF << I40E_PFPE_CQARM_PECQID_SHIFT)
-#define I40E_PFPE_CQPDB 0x00008000 /* Reset: PFR */
-#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0
-#define I40E_PFPE_CQPDB_WQHEAD_MASK (0x7FF << I40E_PFPE_CQPDB_WQHEAD_SHIFT)
-#define I40E_PFPE_CQPERRCODES 0x00008880 /* Reset: PFR */
-#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
-#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK (0xFFFF << I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
-#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
-#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
-#define I40E_PFPE_CQPTAIL 0x00008080 /* Reset: PFR */
-#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0
-#define I40E_PFPE_CQPTAIL_WQTAIL_MASK (0x7FF << I40E_PFPE_CQPTAIL_WQTAIL_SHIFT)
-#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
-#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK (0x1 << I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
-#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980 /* Reset: PFR */
-#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
-#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
-#define I40E_PFPE_FLMXMITALLOCERR 0x00008900 /* Reset: PFR */
-#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
-#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT)
-#define I40E_PFPE_IPCONFIG0 0x00008280 /* Reset: PFR */
-#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0
-#define I40E_PFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_PFPE_IPCONFIG0_PEIPID_SHIFT)
-#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
-#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
-#define I40E_PFPE_MRTEIDXMASK 0x00008600 /* Reset: PFR */
-#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
-#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
-#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680 /* Reset: PFR */
-#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
-#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
-#define I40E_PFPE_TCPNOWTIMER 0x00008580 /* Reset: PFR */
-#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
-#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK (0xFFFFFFFF << I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
-
-#define I40E_PFPE_WQEALLOC 0x00138C00 /* Reset: PFR */
-#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0
-#define I40E_PFPE_WQEALLOC_PEQPID_MASK (0x3FFFF << I40E_PFPE_WQEALLOC_PEQPID_SHIFT)
-#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
-#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK (0xFFF << I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
-
-#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFPE_AEQALLOC_MAX_INDEX 127
-#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0
-#define I40E_VFPE_AEQALLOC_AECOUNT_MASK (0xFFFFFFFF << I40E_VFPE_AEQALLOC_AECOUNT_SHIFT)
-#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127
-#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
-#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
-#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFPE_CCQPLOW_MAX_INDEX 127
-#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0
-#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK (0xFFFFFFFF << I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT)
-#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127
-#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
-#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK (0x1 << I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
-#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4
-#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_MASK (0x7 << I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT)
-#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16
-#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_MASK (0x3F << I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT)
-#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
-#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK (0x1 << I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
-#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFPE_CQACK_MAX_INDEX 127
-#define I40E_VFPE_CQACK_PECQID_SHIFT 0
-#define I40E_VFPE_CQACK_PECQID_MASK (0x1FFFF << I40E_VFPE_CQACK_PECQID_SHIFT)
-#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFPE_CQARM_MAX_INDEX 127
-#define I40E_VFPE_CQARM_PECQID_SHIFT 0
-#define I40E_VFPE_CQARM_PECQID_MASK (0x1FFFF << I40E_VFPE_CQARM_PECQID_SHIFT)
-#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFPE_CQPDB_MAX_INDEX 127
-#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0
-#define I40E_VFPE_CQPDB_WQHEAD_MASK (0x7FF << I40E_VFPE_CQPDB_WQHEAD_SHIFT)
-#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127
-#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
-#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
-#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
-#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
-#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFPE_CQPTAIL_MAX_INDEX 127
-#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0
-#define I40E_VFPE_CQPTAIL_WQTAIL_MASK (0x7FF << I40E_VFPE_CQPTAIL_WQTAIL_SHIFT)
-#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
-#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK (0x1 << I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
-#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127
-#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0
-#define I40E_VFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG0_PEIPID_SHIFT)
-#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
-#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
-#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127
-#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
-#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
-#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127
-#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
-#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
-#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127
-#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
-#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK (0xFFFFFFFF << I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
-#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFPE_WQEALLOC_MAX_INDEX 127
-#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0
-#define I40E_VFPE_WQEALLOC_PEQPID_MASK (0x3FFFF << I40E_VFPE_WQEALLOC_PEQPID_SHIFT)
-#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
-#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK (0xFFF << I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
-
-#define I40E_GLPE_CPUSTATUS0 0x0000D040 /* Reset: PE_CORER */
-#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0
-#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT)
-#define I40E_GLPE_CPUSTATUS1 0x0000D044 /* Reset: PE_CORER */
-#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0
-#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT)
-#define I40E_GLPE_CPUSTATUS2 0x0000D048 /* Reset: PE_CORER */
-#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0
-#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT)
-#define I40E_GLPE_CPUTRIG0 0x0000D060 /* Reset: PE_CORER */
-#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT 0
-#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_MASK (0xFFFF << I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT)
-#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT 17
-#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_MASK (0x1 << I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT)
-#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT 18
-#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_MASK (0x1 << I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT)
-#define I40E_GLPE_DUAL40_RUPM 0x0000DA04 /* Reset: PE_CORER */
-#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT 0
-#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_MASK (0x1 << I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT)
-#define I40E_GLPE_PFAEQEDROPCNT(_i) (0x00131440 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLPE_PFAEQEDROPCNT_MAX_INDEX 15
-#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0
-#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_MASK (0xFFFF << I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT)
-#define I40E_GLPE_PFCEQEDROPCNT(_i) (0x001313C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLPE_PFCEQEDROPCNT_MAX_INDEX 15
-#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0
-#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_MASK (0xFFFF << I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT)
-#define I40E_GLPE_PFCQEDROPCNT(_i) (0x00131340 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLPE_PFCQEDROPCNT_MAX_INDEX 15
-#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT 0
-#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_MASK (0xFFFF << I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT)
-#define I40E_GLPE_RUPM_CQPPOOL 0x0000DACC /* Reset: PE_CORER */
-#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT 0
-#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_MASK (0xFF << I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT)
-#define I40E_GLPE_RUPM_FLRPOOL 0x0000DAC4 /* Reset: PE_CORER */
-#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT 0
-#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_MASK (0xFF << I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT)
-#define I40E_GLPE_RUPM_GCTL 0x0000DA00 /* Reset: PE_CORER */
-#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT 0
-#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_MASK (0xFF << I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT)
-#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT 26
-#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_MASK (0x1 << I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT)
-#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT 27
-#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_MASK (0x1 << I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT)
-#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT 28
-#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_MASK (0x1 << I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT)
-#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT 29
-#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_MASK (0x1 << I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT)
-#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT 30
-#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_MASK (0x1 << I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT)
-#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT 31
-#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_MASK (0x1 << I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT)
-#define I40E_GLPE_RUPM_PTXPOOL 0x0000DAC8 /* Reset: PE_CORER */
-#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT 0
-#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_MASK (0xFF << I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT)
-#define I40E_GLPE_RUPM_PUSHPOOL 0x0000DAC0 /* Reset: PE_CORER */
-#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT 0
-#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_MASK (0xFF << I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT)
-#define I40E_GLPE_RUPM_TXHOST_EN 0x0000DA08 /* Reset: PE_CORER */
-#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT 0
-#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_MASK (0x1 << I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT)
-#define I40E_GLPE_VFAEQEDROPCNT(_i) (0x00132540 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLPE_VFAEQEDROPCNT_MAX_INDEX 31
-#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0
-#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_MASK (0xFFFF << I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT)
-#define I40E_GLPE_VFCEQEDROPCNT(_i) (0x00132440 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLPE_VFCEQEDROPCNT_MAX_INDEX 31
-#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0
-#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_MASK (0xFFFF << I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT)
-#define I40E_GLPE_VFCQEDROPCNT(_i) (0x00132340 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLPE_VFCQEDROPCNT_MAX_INDEX 31
-#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT 0
-#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_MASK (0xFFFF << I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT)
-#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31
-#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
-#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK (0x7 << I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
-#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
-#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK (0x7 << I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
-#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31
-#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
-#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
-#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31
-#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
-#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT)
-#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31
-#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0
-#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT)
-#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1
-#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT)
-#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2
-#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT)
-#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3
-#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT)
-#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
-#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK (0x1 << I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT)
-#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31
-#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0
-#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK (0x3FFFF << I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT)
-#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31
-#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK (0x1 << I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT)
-
-#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
-#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
-#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
-#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
-#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
-#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
-#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
-#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
-#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
-#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
-#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
-#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
-#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
-#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
-#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
-#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
-#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
-#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
-#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
-#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
-#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
-#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
-#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
-#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
-#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
-#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
-#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
-#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
-#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
-#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
-#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
-#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
-#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
-#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
-#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
-#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
-#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
-#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
-#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
-#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
-#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
-#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
-#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
-#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
-#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
-#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
-#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
-#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
-#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
-#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
-#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
-#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
-#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
-#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
-#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
-#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
-#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
-#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0
-#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT)
-#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0
-#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT)
-#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15
-#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0
-#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK (0xFFFFFF << I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT)
-#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15
-#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0
-#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT)
-#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15
-#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
-#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK (0xFFFFFF << I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
-#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15
-#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
-#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK (0xFFFFFF << I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
-#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15
-#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
-#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK (0xFFFF << I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
-#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15
-#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
-#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
-#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15
-#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
-#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK (0xFFFF << I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
-#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15
-#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
-#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
-#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
-#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
-#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
-#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
-#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
-#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
-#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
-#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
-#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014 /* Reset: PE_CORER */
-#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0
-#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK (0xFFFFFF << I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT)
-#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010 /* Reset: PE_CORER */
-#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0
-#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT)
-#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C /* Reset: PE_CORER */
-#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0
-#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK (0xFFFFFF << I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT)
-#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018 /* Reset: PE_CORER */
-#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0
-#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT)
-#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004 /* Reset: PE_CORER */
-#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0
-#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT)
-#define I40E_GLPES_RDMARXUNALIGN 0x0001E000 /* Reset: PE_CORER */
-#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0
-#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT)
-#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044 /* Reset: PE_CORER */
-#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0
-#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT)
-#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040 /* Reset: PE_CORER */
-#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0
-#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT)
-#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C /* Reset: PE_CORER */
-#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0
-#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT)
-#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028 /* Reset: PE_CORER */
-#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0
-#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT)
-#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024 /* Reset: PE_CORER */
-#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0
-#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT)
-#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020 /* Reset: PE_CORER */
-#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0
-#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT)
-#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C /* Reset: PE_CORER */
-#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0
-#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT)
-#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038 /* Reset: PE_CORER */
-#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0
-#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT)
-#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034 /* Reset: PE_CORER */
-#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0
-#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT)
-#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030 /* Reset: PE_CORER */
-#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0
-#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT)
-#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C /* Reset: PE_CORER */
-#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0
-#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT)
-#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048 /* Reset: PE_CORER */
-#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0
-#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT)
-#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054 /* Reset: PE_CORER */
-#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0
-#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT)
-#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050 /* Reset: PE_CORER */
-#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0
-#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT)
-#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C /* Reset: PE_CORER */
-#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0
-#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT)
-#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058 /* Reset: PE_CORER */
-#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0
-#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT)
-#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
-#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
-#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
-#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
-#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
-#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
-#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
-#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
-#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
-#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
-#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
-#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
-#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
-#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
-#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
-#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
-#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
-#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
-#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
-#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
-#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
-#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
-#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
-#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
-#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
-#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
-#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
-#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
-#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
-#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
-#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
-#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
-#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
-#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
-#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
-#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
-#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
-#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
-#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
-#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
-#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
-#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
-#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
-#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
-#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
-#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
-#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
-#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
-#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
-#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
-#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
-#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
-#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
-#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
-#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
-#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
-#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
-#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0
-#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT)
-#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0
-#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT)
-#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31
-#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0
-#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK (0xFFFFFF << I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT)
-#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31
-#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0
-#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT)
-#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31
-#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
-#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK (0xFFFFFF << I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
-#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31
-#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
-#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK (0xFFFFFF << I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
-#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31
-#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
-#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK (0xFFFF << I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
-#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31
-#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
-#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
-#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31
-#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
-#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK (0xFFFF << I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
-#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31
-#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
-#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
-#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
-#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
-#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
-#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
-#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
-#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
-#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
-#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
-
-#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */
-#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0
-#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK (0xFFFFFFFF << I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)
-#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */
-#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
-#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
-#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */
-#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
-#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK (0xFFFFFFFF << I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)
-#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */
-#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
-#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK (0x1 << I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
-#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4
-#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK (0x7 << I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT)
-#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16
-#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK (0x3F << I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT)
-#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
-#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK (0x1 << I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
-#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */
-#define I40E_VFPE_CQACK1_PECQID_SHIFT 0
-#define I40E_VFPE_CQACK1_PECQID_MASK (0x1FFFF << I40E_VFPE_CQACK1_PECQID_SHIFT)
-#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */
-#define I40E_VFPE_CQARM1_PECQID_SHIFT 0
-#define I40E_VFPE_CQARM1_PECQID_MASK (0x1FFFF << I40E_VFPE_CQARM1_PECQID_SHIFT)
-#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */
-#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0
-#define I40E_VFPE_CQPDB1_WQHEAD_MASK (0x7FF << I40E_VFPE_CQPDB1_WQHEAD_SHIFT)
-#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */
-#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
-#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
-#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
-#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
-#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */
-#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0
-#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK (0x7FF << I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)
-#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
-#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK (0x1 << I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
-#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */
-#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0
-#define I40E_VFPE_IPCONFIG01_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
-#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
-#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
-#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */
-#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
-#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
-#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */
-#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
-#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
-#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */
-#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
-#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK (0xFFFFFFFF << I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
-#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */
-#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0
-#define I40E_VFPE_WQEALLOC1_PEQPID_MASK (0x3FFFF << I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
-#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
-#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK (0xFFF << I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
-#endif /* I40IW_REGISTER_H */
diff --git a/drivers/infiniband/hw/i40iw/i40iw_status.h b/drivers/infiniband/hw/i40iw/i40iw_status.h
deleted file mode 100644
index 36a19c4e5bba..000000000000
--- a/drivers/infiniband/hw/i40iw/i40iw_status.h
+++ /dev/null
@@ -1,101 +0,0 @@
-/*******************************************************************************
-*
-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
-*
-* This software is available to you under a choice of one of two
-* licenses. You may choose to be licensed under the terms of the GNU
-* General Public License (GPL) Version 2, available from the file
-* COPYING in the main directory of this source tree, or the
-* OpenFabrics.org BSD license below:
-*
-* Redistribution and use in source and binary forms, with or
-* without modification, are permitted provided that the following
-* conditions are met:
-*
-* - Redistributions of source code must retain the above
-* copyright notice, this list of conditions and the following
-* disclaimer.
-*
-* - Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials
-* provided with the distribution.
-*
-* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-* SOFTWARE.
-*
-*******************************************************************************/
-
-#ifndef I40IW_STATUS_H
-#define I40IW_STATUS_H
-
-/* Error Codes */
-enum i40iw_status_code {
- I40IW_SUCCESS = 0,
- I40IW_ERR_NVM = -1,
- I40IW_ERR_NVM_CHECKSUM = -2,
- I40IW_ERR_CONFIG = -4,
- I40IW_ERR_PARAM = -5,
- I40IW_ERR_DEVICE_NOT_SUPPORTED = -6,
- I40IW_ERR_RESET_FAILED = -7,
- I40IW_ERR_SWFW_SYNC = -8,
- I40IW_ERR_NO_MEMORY = -9,
- I40IW_ERR_BAD_PTR = -10,
- I40IW_ERR_INVALID_PD_ID = -11,
- I40IW_ERR_INVALID_QP_ID = -12,
- I40IW_ERR_INVALID_CQ_ID = -13,
- I40IW_ERR_INVALID_CEQ_ID = -14,
- I40IW_ERR_INVALID_AEQ_ID = -15,
- I40IW_ERR_INVALID_SIZE = -16,
- I40IW_ERR_INVALID_ARP_INDEX = -17,
- I40IW_ERR_INVALID_FPM_FUNC_ID = -18,
- I40IW_ERR_QP_INVALID_MSG_SIZE = -19,
- I40IW_ERR_QP_TOOMANY_WRS_POSTED = -20,
- I40IW_ERR_INVALID_FRAG_COUNT = -21,
- I40IW_ERR_QUEUE_EMPTY = -22,
- I40IW_ERR_INVALID_ALIGNMENT = -23,
- I40IW_ERR_FLUSHED_QUEUE = -24,
- I40IW_ERR_INVALID_INLINE_DATA_SIZE = -26,
- I40IW_ERR_TIMEOUT = -27,
- I40IW_ERR_OPCODE_MISMATCH = -28,
- I40IW_ERR_CQP_COMPL_ERROR = -29,
- I40IW_ERR_INVALID_VF_ID = -30,
- I40IW_ERR_INVALID_HMCFN_ID = -31,
- I40IW_ERR_BACKING_PAGE_ERROR = -32,
- I40IW_ERR_NO_PBLCHUNKS_AVAILABLE = -33,
- I40IW_ERR_INVALID_PBLE_INDEX = -34,
- I40IW_ERR_INVALID_SD_INDEX = -35,
- I40IW_ERR_INVALID_PAGE_DESC_INDEX = -36,
- I40IW_ERR_INVALID_SD_TYPE = -37,
- I40IW_ERR_MEMCPY_FAILED = -38,
- I40IW_ERR_INVALID_HMC_OBJ_INDEX = -39,
- I40IW_ERR_INVALID_HMC_OBJ_COUNT = -40,
- I40IW_ERR_INVALID_SRQ_ARM_LIMIT = -41,
- I40IW_ERR_SRQ_ENABLED = -42,
- I40IW_ERR_BUF_TOO_SHORT = -43,
- I40IW_ERR_BAD_IWARP_CQE = -44,
- I40IW_ERR_NVM_BLANK_MODE = -45,
- I40IW_ERR_NOT_IMPLEMENTED = -46,
- I40IW_ERR_PE_DOORBELL_NOT_ENABLED = -47,
- I40IW_ERR_NOT_READY = -48,
- I40IW_NOT_SUPPORTED = -49,
- I40IW_ERR_FIRMWARE_API_VERSION = -50,
- I40IW_ERR_RING_FULL = -51,
- I40IW_ERR_MPA_CRC = -61,
- I40IW_ERR_NO_TXBUFS = -62,
- I40IW_ERR_SEQ_NUM = -63,
- I40IW_ERR_list_empty = -64,
- I40IW_ERR_INVALID_MAC_ADDR = -65,
- I40IW_ERR_BAD_STAG = -66,
- I40IW_ERR_CQ_COMPL_ERROR = -67,
- I40IW_ERR_QUEUE_DESTROYED = -68,
- I40IW_ERR_INVALID_FEAT_CNT = -69
-
-};
-#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_type.h b/drivers/infiniband/hw/i40iw/i40iw_type.h
deleted file mode 100644
index 394e182686cf..000000000000
--- a/drivers/infiniband/hw/i40iw/i40iw_type.h
+++ /dev/null
@@ -1,1358 +0,0 @@
-/*******************************************************************************
-*
-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
-*
-* This software is available to you under a choice of one of two
-* licenses. You may choose to be licensed under the terms of the GNU
-* General Public License (GPL) Version 2, available from the file
-* COPYING in the main directory of this source tree, or the
-* OpenFabrics.org BSD license below:
-*
-* Redistribution and use in source and binary forms, with or
-* without modification, are permitted provided that the following
-* conditions are met:
-*
-* - Redistributions of source code must retain the above
-* copyright notice, this list of conditions and the following
-* disclaimer.
-*
-* - Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials
-* provided with the distribution.
-*
-* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-* SOFTWARE.
-*
-*******************************************************************************/
-
-#ifndef I40IW_TYPE_H
-#define I40IW_TYPE_H
-#include "i40iw_user.h"
-#include "i40iw_hmc.h"
-#include "i40iw_vf.h"
-#include "i40iw_virtchnl.h"
-
-struct i40iw_cqp_sq_wqe {
- u64 buf[I40IW_CQP_WQE_SIZE];
-};
-
-struct i40iw_sc_aeqe {
- u64 buf[I40IW_AEQE_SIZE];
-};
-
-struct i40iw_ceqe {
- u64 buf[I40IW_CEQE_SIZE];
-};
-
-struct i40iw_cqp_ctx {
- u64 buf[I40IW_CQP_CTX_SIZE];
-};
-
-struct i40iw_cq_shadow_area {
- u64 buf[I40IW_SHADOW_AREA_SIZE];
-};
-
-struct i40iw_sc_dev;
-struct i40iw_hmc_info;
-struct i40iw_vsi_pestat;
-
-struct i40iw_cqp_ops;
-struct i40iw_ccq_ops;
-struct i40iw_ceq_ops;
-struct i40iw_aeq_ops;
-struct i40iw_mr_ops;
-struct i40iw_cqp_misc_ops;
-struct i40iw_pd_ops;
-struct i40iw_priv_qp_ops;
-struct i40iw_priv_cq_ops;
-struct i40iw_hmc_ops;
-struct pci_dev;
-
-enum i40iw_page_size {
- I40IW_PAGE_SIZE_4K,
- I40IW_PAGE_SIZE_2M
-};
-
-enum i40iw_resource_indicator_type {
- I40IW_RSRC_INDICATOR_TYPE_ADAPTER = 0,
- I40IW_RSRC_INDICATOR_TYPE_CQ,
- I40IW_RSRC_INDICATOR_TYPE_QP,
- I40IW_RSRC_INDICATOR_TYPE_SRQ
-};
-
-enum i40iw_hdrct_flags {
- DDP_LEN_FLAG = 0x80,
- DDP_HDR_FLAG = 0x40,
- RDMA_HDR_FLAG = 0x20
-};
-
-enum i40iw_term_layers {
- LAYER_RDMA = 0,
- LAYER_DDP = 1,
- LAYER_MPA = 2
-};
-
-enum i40iw_term_error_types {
- RDMAP_REMOTE_PROT = 1,
- RDMAP_REMOTE_OP = 2,
- DDP_CATASTROPHIC = 0,
- DDP_TAGGED_BUFFER = 1,
- DDP_UNTAGGED_BUFFER = 2,
- DDP_LLP = 3
-};
-
-enum i40iw_term_rdma_errors {
- RDMAP_INV_STAG = 0x00,
- RDMAP_INV_BOUNDS = 0x01,
- RDMAP_ACCESS = 0x02,
- RDMAP_UNASSOC_STAG = 0x03,
- RDMAP_TO_WRAP = 0x04,
- RDMAP_INV_RDMAP_VER = 0x05,
- RDMAP_UNEXPECTED_OP = 0x06,
- RDMAP_CATASTROPHIC_LOCAL = 0x07,
- RDMAP_CATASTROPHIC_GLOBAL = 0x08,
- RDMAP_CANT_INV_STAG = 0x09,
- RDMAP_UNSPECIFIED = 0xff
-};
-
-enum i40iw_term_ddp_errors {
- DDP_CATASTROPHIC_LOCAL = 0x00,
- DDP_TAGGED_INV_STAG = 0x00,
- DDP_TAGGED_BOUNDS = 0x01,
- DDP_TAGGED_UNASSOC_STAG = 0x02,
- DDP_TAGGED_TO_WRAP = 0x03,
- DDP_TAGGED_INV_DDP_VER = 0x04,
- DDP_UNTAGGED_INV_QN = 0x01,
- DDP_UNTAGGED_INV_MSN_NO_BUF = 0x02,
- DDP_UNTAGGED_INV_MSN_RANGE = 0x03,
- DDP_UNTAGGED_INV_MO = 0x04,
- DDP_UNTAGGED_INV_TOO_LONG = 0x05,
- DDP_UNTAGGED_INV_DDP_VER = 0x06
-};
-
-enum i40iw_term_mpa_errors {
- MPA_CLOSED = 0x01,
- MPA_CRC = 0x02,
- MPA_MARKER = 0x03,
- MPA_REQ_RSP = 0x04,
-};
-
-enum i40iw_flush_opcode {
- FLUSH_INVALID = 0,
- FLUSH_PROT_ERR,
- FLUSH_REM_ACCESS_ERR,
- FLUSH_LOC_QP_OP_ERR,
- FLUSH_REM_OP_ERR,
- FLUSH_LOC_LEN_ERR,
- FLUSH_GENERAL_ERR,
- FLUSH_FATAL_ERR
-};
-
-enum i40iw_term_eventtypes {
- TERM_EVENT_QP_FATAL,
- TERM_EVENT_QP_ACCESS_ERR
-};
-
-struct i40iw_terminate_hdr {
- u8 layer_etype;
- u8 error_code;
- u8 hdrct;
- u8 rsvd;
-};
-
-enum i40iw_debug_flag {
- I40IW_DEBUG_NONE = 0x00000000,
- I40IW_DEBUG_ERR = 0x00000001,
- I40IW_DEBUG_INIT = 0x00000002,
- I40IW_DEBUG_DEV = 0x00000004,
- I40IW_DEBUG_CM = 0x00000008,
- I40IW_DEBUG_VERBS = 0x00000010,
- I40IW_DEBUG_PUDA = 0x00000020,
- I40IW_DEBUG_ILQ = 0x00000040,
- I40IW_DEBUG_IEQ = 0x00000080,
- I40IW_DEBUG_QP = 0x00000100,
- I40IW_DEBUG_CQ = 0x00000200,
- I40IW_DEBUG_MR = 0x00000400,
- I40IW_DEBUG_PBLE = 0x00000800,
- I40IW_DEBUG_WQE = 0x00001000,
- I40IW_DEBUG_AEQ = 0x00002000,
- I40IW_DEBUG_CQP = 0x00004000,
- I40IW_DEBUG_HMC = 0x00008000,
- I40IW_DEBUG_USER = 0x00010000,
- I40IW_DEBUG_VIRT = 0x00020000,
- I40IW_DEBUG_DCB = 0x00040000,
- I40IW_DEBUG_CQE = 0x00800000,
- I40IW_DEBUG_ALL = 0xFFFFFFFF
-};
-
-enum i40iw_hw_stats_index_32b {
- I40IW_HW_STAT_INDEX_IP4RXDISCARD = 0,
- I40IW_HW_STAT_INDEX_IP4RXTRUNC,
- I40IW_HW_STAT_INDEX_IP4TXNOROUTE,
- I40IW_HW_STAT_INDEX_IP6RXDISCARD,
- I40IW_HW_STAT_INDEX_IP6RXTRUNC,
- I40IW_HW_STAT_INDEX_IP6TXNOROUTE,
- I40IW_HW_STAT_INDEX_TCPRTXSEG,
- I40IW_HW_STAT_INDEX_TCPRXOPTERR,
- I40IW_HW_STAT_INDEX_TCPRXPROTOERR,
- I40IW_HW_STAT_INDEX_MAX_32
-};
-
-enum i40iw_hw_stats_index_64b {
- I40IW_HW_STAT_INDEX_IP4RXOCTS = 0,
- I40IW_HW_STAT_INDEX_IP4RXPKTS,
- I40IW_HW_STAT_INDEX_IP4RXFRAGS,
- I40IW_HW_STAT_INDEX_IP4RXMCPKTS,
- I40IW_HW_STAT_INDEX_IP4TXOCTS,
- I40IW_HW_STAT_INDEX_IP4TXPKTS,
- I40IW_HW_STAT_INDEX_IP4TXFRAGS,
- I40IW_HW_STAT_INDEX_IP4TXMCPKTS,
- I40IW_HW_STAT_INDEX_IP6RXOCTS,
- I40IW_HW_STAT_INDEX_IP6RXPKTS,
- I40IW_HW_STAT_INDEX_IP6RXFRAGS,
- I40IW_HW_STAT_INDEX_IP6RXMCPKTS,
- I40IW_HW_STAT_INDEX_IP6TXOCTS,
- I40IW_HW_STAT_INDEX_IP6TXPKTS,
- I40IW_HW_STAT_INDEX_IP6TXFRAGS,
- I40IW_HW_STAT_INDEX_IP6TXMCPKTS,
- I40IW_HW_STAT_INDEX_TCPRXSEGS,
- I40IW_HW_STAT_INDEX_TCPTXSEG,
- I40IW_HW_STAT_INDEX_RDMARXRDS,
- I40IW_HW_STAT_INDEX_RDMARXSNDS,
- I40IW_HW_STAT_INDEX_RDMARXWRS,
- I40IW_HW_STAT_INDEX_RDMATXRDS,
- I40IW_HW_STAT_INDEX_RDMATXSNDS,
- I40IW_HW_STAT_INDEX_RDMATXWRS,
- I40IW_HW_STAT_INDEX_RDMAVBND,
- I40IW_HW_STAT_INDEX_RDMAVINV,
- I40IW_HW_STAT_INDEX_MAX_64
-};
-
-enum i40iw_feature_type {
- I40IW_FEATURE_FW_INFO = 0,
- I40IW_MAX_FEATURES
-};
-
-struct i40iw_dev_hw_stats_offsets {
- u32 stats_offset_32[I40IW_HW_STAT_INDEX_MAX_32];
- u32 stats_offset_64[I40IW_HW_STAT_INDEX_MAX_64];
-};
-
-struct i40iw_dev_hw_stats {
- u64 stats_value_32[I40IW_HW_STAT_INDEX_MAX_32];
- u64 stats_value_64[I40IW_HW_STAT_INDEX_MAX_64];
-};
-
-struct i40iw_vsi_pestat {
- struct i40iw_hw *hw;
- struct i40iw_dev_hw_stats hw_stats;
- struct i40iw_dev_hw_stats last_read_hw_stats;
- struct i40iw_dev_hw_stats_offsets hw_stats_offsets;
- struct timer_list stats_timer;
- struct i40iw_sc_vsi *vsi;
- spinlock_t lock; /* rdma stats lock */
-};
-
-struct i40iw_hw {
- u8 __iomem *hw_addr;
- struct pci_dev *pcidev;
- struct i40iw_hmc_info hmc;
-};
-
-struct i40iw_pfpdu {
- struct list_head rxlist;
- u32 rcv_nxt;
- u32 fps;
- u32 max_fpdu_data;
- bool mode;
- bool mpa_crc_err;
- u64 total_ieq_bufs;
- u64 fpdu_processed;
- u64 bad_seq_num;
- u64 crc_err;
- u64 no_tx_bufs;
- u64 tx_err;
- u64 out_of_order;
- u64 pmode_count;
-};
-
-struct i40iw_sc_pd {
- u32 size;
- struct i40iw_sc_dev *dev;
- u16 pd_id;
- int abi_ver;
-};
-
-struct i40iw_cqp_quanta {
- u64 elem[I40IW_CQP_WQE_SIZE];
-};
-
-struct i40iw_sc_cqp {
- u32 size;
- u64 sq_pa;
- u64 host_ctx_pa;
- void *back_cqp;
- struct i40iw_sc_dev *dev;
- enum i40iw_status_code (*process_cqp_sds)(struct i40iw_sc_dev *,
- struct i40iw_update_sds_info *);
- struct i40iw_dma_mem sdbuf;
- struct i40iw_ring sq_ring;
- struct i40iw_cqp_quanta *sq_base;
- u64 *host_ctx;
- u64 *scratch_array;
- u32 cqp_id;
- u32 sq_size;
- u32 hw_sq_size;
- u8 struct_ver;
- u8 polarity;
- bool en_datacenter_tcp;
- u8 hmc_profile;
- u8 enabled_vf_count;
- u8 timeout_count;
-};
-
-struct i40iw_sc_aeq {
- u32 size;
- u64 aeq_elem_pa;
- struct i40iw_sc_dev *dev;
- struct i40iw_sc_aeqe *aeqe_base;
- void *pbl_list;
- u32 elem_cnt;
- struct i40iw_ring aeq_ring;
- bool virtual_map;
- u8 pbl_chunk_size;
- u32 first_pm_pbl_idx;
- u8 polarity;
-};
-
-struct i40iw_sc_ceq {
- u32 size;
- u64 ceq_elem_pa;
- struct i40iw_sc_dev *dev;
- struct i40iw_ceqe *ceqe_base;
- void *pbl_list;
- u32 ceq_id;
- u32 elem_cnt;
- struct i40iw_ring ceq_ring;
- bool virtual_map;
- u8 pbl_chunk_size;
- bool tph_en;
- u8 tph_val;
- u32 first_pm_pbl_idx;
- u8 polarity;
-};
-
-struct i40iw_sc_cq {
- struct i40iw_cq_uk cq_uk;
- u64 cq_pa;
- u64 shadow_area_pa;
- struct i40iw_sc_dev *dev;
- struct i40iw_sc_vsi *vsi;
- void *pbl_list;
- void *back_cq;
- u32 ceq_id;
- u32 shadow_read_threshold;
- bool ceqe_mask;
- bool virtual_map;
- u8 pbl_chunk_size;
- u8 cq_type;
- bool ceq_id_valid;
- bool tph_en;
- u8 tph_val;
- u32 first_pm_pbl_idx;
- bool check_overflow;
-};
-
-struct i40iw_sc_qp {
- struct i40iw_qp_uk qp_uk;
- u64 sq_pa;
- u64 rq_pa;
- u64 hw_host_ctx_pa;
- u64 shadow_area_pa;
- u64 q2_pa;
- struct i40iw_sc_dev *dev;
- struct i40iw_sc_vsi *vsi;
- struct i40iw_sc_pd *pd;
- u64 *hw_host_ctx;
- void *llp_stream_handle;
- void *back_qp;
- struct i40iw_pfpdu pfpdu;
- u8 *q2_buf;
- u64 qp_compl_ctx;
- u16 qs_handle;
- u8 sq_tph_val;
- u8 rq_tph_val;
- u8 qp_state;
- u8 qp_type;
- u8 hw_sq_size;
- u8 hw_rq_size;
- u8 src_mac_addr_idx;
- bool sq_tph_en;
- bool rq_tph_en;
- bool rcv_tph_en;
- bool xmit_tph_en;
- bool virtual_map;
- bool flush_sq;
- bool flush_rq;
- u8 user_pri;
- struct list_head list;
- bool on_qoslist;
- bool sq_flush;
- enum i40iw_flush_opcode flush_code;
- enum i40iw_term_eventtypes eventtype;
- u8 term_flags;
-};
-
-struct i40iw_hmc_fpm_misc {
- u32 max_ceqs;
- u32 max_sds;
- u32 xf_block_size;
- u32 q1_block_size;
- u32 ht_multiplier;
- u32 timer_bucket;
-};
-
-struct i40iw_vchnl_if {
- enum i40iw_status_code (*vchnl_recv)(struct i40iw_sc_dev *, u32, u8 *, u16);
- enum i40iw_status_code (*vchnl_send)(struct i40iw_sc_dev *dev, u32, u8 *, u16);
-};
-
-#define I40IW_VCHNL_MAX_VF_MSG_SIZE 512
-
-struct i40iw_vchnl_vf_msg_buffer {
- struct i40iw_virtchnl_op_buf vchnl_msg;
- char parm_buffer[I40IW_VCHNL_MAX_VF_MSG_SIZE - 1];
-};
-
-struct i40iw_qos {
- struct list_head qplist;
- spinlock_t lock; /* qos list */
- u16 qs_handle;
-};
-
-struct i40iw_vfdev {
- struct i40iw_sc_dev *pf_dev;
- u8 *hmc_info_mem;
- struct i40iw_vsi_pestat pestat;
- struct i40iw_hmc_pble_info *pble_info;
- struct i40iw_hmc_info hmc_info;
- struct i40iw_vchnl_vf_msg_buffer vf_msg_buffer;
- u64 fpm_query_buf_pa;
- u64 *fpm_query_buf;
- u32 vf_id;
- u32 msg_count;
- bool pf_hmc_initialized;
- u16 pmf_index;
- u16 iw_vf_idx; /* VF Device table index */
- bool stats_initialized;
-};
-
-#define I40IW_INVALID_FCN_ID 0xff
-struct i40iw_sc_vsi {
- struct i40iw_sc_dev *dev;
- void *back_vsi; /* Owned by OS */
- u32 ilq_count;
- struct i40iw_virt_mem ilq_mem;
- struct i40iw_puda_rsrc *ilq;
- u32 ieq_count;
- struct i40iw_virt_mem ieq_mem;
- struct i40iw_puda_rsrc *ieq;
- u16 exception_lan_queue;
- u16 mtu;
- u8 fcn_id;
- bool stats_fcn_id_alloc;
- struct i40iw_qos qos[I40IW_MAX_USER_PRIORITY];
- struct i40iw_vsi_pestat *pestat;
-};
-
-struct i40iw_sc_dev {
- struct list_head cqp_cmd_head; /* head of the CQP command list */
- spinlock_t cqp_lock; /* cqp list sync */
- struct i40iw_dev_uk dev_uk;
- bool fcn_id_array[I40IW_MAX_STATS_COUNT];
- struct i40iw_dma_mem vf_fpm_query_buf[I40IW_MAX_PE_ENABLED_VF_COUNT];
- u64 fpm_query_buf_pa;
- u64 fpm_commit_buf_pa;
- u64 *fpm_query_buf;
- u64 *fpm_commit_buf;
- void *back_dev;
- struct i40iw_hw *hw;
- u8 __iomem *db_addr;
- struct i40iw_hmc_info *hmc_info;
- struct i40iw_hmc_pble_info *pble_info;
- struct i40iw_vfdev *vf_dev[I40IW_MAX_PE_ENABLED_VF_COUNT];
- struct i40iw_sc_cqp *cqp;
- struct i40iw_sc_aeq *aeq;
- struct i40iw_sc_ceq *ceq[I40IW_CEQ_MAX_COUNT];
- struct i40iw_sc_cq *ccq;
- const struct i40iw_cqp_ops *cqp_ops;
- const struct i40iw_ccq_ops *ccq_ops;
- const struct i40iw_ceq_ops *ceq_ops;
- const struct i40iw_aeq_ops *aeq_ops;
- const struct i40iw_pd_ops *iw_pd_ops;
- const struct i40iw_priv_qp_ops *iw_priv_qp_ops;
- const struct i40iw_priv_cq_ops *iw_priv_cq_ops;
- const struct i40iw_mr_ops *mr_ops;
- const struct i40iw_cqp_misc_ops *cqp_misc_ops;
- const struct i40iw_hmc_ops *hmc_ops;
- struct i40iw_vchnl_if vchnl_if;
- const struct i40iw_vf_cqp_ops *iw_vf_cqp_ops;
-
- struct i40iw_hmc_fpm_misc hmc_fpm_misc;
- u64 feature_info[I40IW_MAX_FEATURES];
- u32 debug_mask;
- u8 hmc_fn_id;
- bool is_pf;
- bool vchnl_up;
- bool ceq_valid;
- u8 vf_id;
- wait_queue_head_t vf_reqs;
- u64 cqp_cmd_stats[OP_SIZE_CQP_STAT_ARRAY];
- struct i40iw_vchnl_vf_msg_buffer vchnl_vf_msg_buf;
- u8 hw_rev;
-};
-
-struct i40iw_modify_cq_info {
- u64 cq_pa;
- struct i40iw_cqe *cq_base;
- void *pbl_list;
- u32 ceq_id;
- u32 cq_size;
- u32 shadow_read_threshold;
- bool virtual_map;
- u8 pbl_chunk_size;
- bool check_overflow;
- bool cq_resize;
- bool ceq_change;
- bool check_overflow_change;
- u32 first_pm_pbl_idx;
- bool ceq_valid;
-};
-
-struct i40iw_create_qp_info {
- u8 next_iwarp_state;
- bool ord_valid;
- bool tcp_ctx_valid;
- bool cq_num_valid;
- bool arp_cache_idx_valid;
-};
-
-struct i40iw_modify_qp_info {
- u64 rx_win0;
- u64 rx_win1;
- u8 next_iwarp_state;
- u8 termlen;
- bool ord_valid;
- bool tcp_ctx_valid;
- bool cq_num_valid;
- bool arp_cache_idx_valid;
- bool reset_tcp_conn;
- bool remove_hash_idx;
- bool dont_send_term;
- bool dont_send_fin;
- bool cached_var_valid;
- bool force_loopback;
-};
-
-struct i40iw_ccq_cqe_info {
- struct i40iw_sc_cqp *cqp;
- u64 scratch;
- u32 op_ret_val;
- u16 maj_err_code;
- u16 min_err_code;
- u8 op_code;
- bool error;
-};
-
-struct i40iw_l2params {
- u16 qs_handle_list[I40IW_MAX_USER_PRIORITY];
- u16 mtu;
-};
-
-struct i40iw_vsi_init_info {
- struct i40iw_sc_dev *dev;
- void *back_vsi;
- struct i40iw_l2params *params;
- u16 exception_lan_queue;
-};
-
-struct i40iw_vsi_stats_info {
- struct i40iw_vsi_pestat *pestat;
- u8 fcn_id;
- bool alloc_fcn_id;
- bool stats_initialize;
-};
-
-struct i40iw_device_init_info {
- u64 fpm_query_buf_pa;
- u64 fpm_commit_buf_pa;
- u64 *fpm_query_buf;
- u64 *fpm_commit_buf;
- struct i40iw_hw *hw;
- void __iomem *bar0;
- enum i40iw_status_code (*vchnl_send)(struct i40iw_sc_dev *, u32, u8 *, u16);
- u8 hmc_fn_id;
- bool is_pf;
- u32 debug_mask;
-};
-
-enum i40iw_cqp_hmc_profile {
- I40IW_HMC_PROFILE_DEFAULT = 1,
- I40IW_HMC_PROFILE_FAVOR_VF = 2,
- I40IW_HMC_PROFILE_EQUAL = 3,
-};
-
-struct i40iw_cqp_init_info {
- u64 cqp_compl_ctx;
- u64 host_ctx_pa;
- u64 sq_pa;
- struct i40iw_sc_dev *dev;
- struct i40iw_cqp_quanta *sq;
- u64 *host_ctx;
- u64 *scratch_array;
- u32 sq_size;
- u8 struct_ver;
- bool en_datacenter_tcp;
- u8 hmc_profile;
- u8 enabled_vf_count;
-};
-
-struct i40iw_ceq_init_info {
- u64 ceqe_pa;
- struct i40iw_sc_dev *dev;
- u64 *ceqe_base;
- void *pbl_list;
- u32 elem_cnt;
- u32 ceq_id;
- bool virtual_map;
- u8 pbl_chunk_size;
- bool tph_en;
- u8 tph_val;
- u32 first_pm_pbl_idx;
-};
-
-struct i40iw_aeq_init_info {
- u64 aeq_elem_pa;
- struct i40iw_sc_dev *dev;
- u32 *aeqe_base;
- void *pbl_list;
- u32 elem_cnt;
- bool virtual_map;
- u8 pbl_chunk_size;
- u32 first_pm_pbl_idx;
-};
-
-struct i40iw_ccq_init_info {
- u64 cq_pa;
- u64 shadow_area_pa;
- struct i40iw_sc_dev *dev;
- struct i40iw_cqe *cq_base;
- u64 *shadow_area;
- void *pbl_list;
- u32 num_elem;
- u32 ceq_id;
- u32 shadow_read_threshold;
- bool ceqe_mask;
- bool ceq_id_valid;
- bool tph_en;
- u8 tph_val;
- bool avoid_mem_cflct;
- bool virtual_map;
- u8 pbl_chunk_size;
- u32 first_pm_pbl_idx;
-};
-
-struct i40iwarp_offload_info {
- u16 rcv_mark_offset;
- u16 snd_mark_offset;
- u16 pd_id;
- u8 ddp_ver;
- u8 rdmap_ver;
- u8 ord_size;
- u8 ird_size;
- bool wr_rdresp_en;
- bool rd_enable;
- bool snd_mark_en;
- bool rcv_mark_en;
- bool bind_en;
- bool fast_reg_en;
- bool priv_mode_en;
- bool lsmm_present;
- u8 iwarp_mode;
- bool align_hdrs;
- bool rcv_no_mpa_crc;
-
- u8 last_byte_sent;
-};
-
-struct i40iw_tcp_offload_info {
- bool ipv4;
- bool no_nagle;
- bool insert_vlan_tag;
- bool time_stamp;
- u8 cwnd_inc_limit;
- bool drop_ooo_seg;
- u8 dup_ack_thresh;
- u8 ttl;
- u8 src_mac_addr_idx;
- bool avoid_stretch_ack;
- u8 tos;
- u16 src_port;
- u16 dst_port;
- u32 dest_ip_addr0;
- u32 dest_ip_addr1;
- u32 dest_ip_addr2;
- u32 dest_ip_addr3;
- u32 snd_mss;
- u16 vlan_tag;
- u16 arp_idx;
- u32 flow_label;
- bool wscale;
- u8 tcp_state;
- u8 snd_wscale;
- u8 rcv_wscale;
- u32 time_stamp_recent;
- u32 time_stamp_age;
- u32 snd_nxt;
- u32 snd_wnd;
- u32 rcv_nxt;
- u32 rcv_wnd;
- u32 snd_max;
- u32 snd_una;
- u32 srtt;
- u32 rtt_var;
- u32 ss_thresh;
- u32 cwnd;
- u32 snd_wl1;
- u32 snd_wl2;
- u32 max_snd_window;
- u8 rexmit_thresh;
- u32 local_ipaddr0;
- u32 local_ipaddr1;
- u32 local_ipaddr2;
- u32 local_ipaddr3;
- bool ignore_tcp_opt;
- bool ignore_tcp_uns_opt;
-};
-
-struct i40iw_qp_host_ctx_info {
- u64 qp_compl_ctx;
- struct i40iw_tcp_offload_info *tcp_info;
- struct i40iwarp_offload_info *iwarp_info;
- u32 send_cq_num;
- u32 rcv_cq_num;
- bool tcp_info_valid;
- bool iwarp_info_valid;
- bool err_rq_idx_valid;
- u16 err_rq_idx;
- bool add_to_qoslist;
- u8 user_pri;
-};
-
-struct i40iw_aeqe_info {
- u64 compl_ctx;
- u32 qp_cq_id;
- u16 ae_id;
- u16 wqe_idx;
- u8 tcp_state;
- u8 iwarp_state;
- bool qp;
- bool cq;
- bool sq;
- bool in_rdrsp_wr;
- bool out_rdrsp;
- u8 q2_data_written;
- bool aeqe_overflow;
-};
-
-struct i40iw_allocate_stag_info {
- u64 total_len;
- u32 chunk_size;
- u32 stag_idx;
- u32 page_size;
- u16 pd_id;
- u16 access_rights;
- bool remote_access;
- bool use_hmc_fcn_index;
- u8 hmc_fcn_index;
- bool use_pf_rid;
-};
-
-struct i40iw_reg_ns_stag_info {
- u64 reg_addr_pa;
- u64 fbo;
- void *va;
- u64 total_len;
- u32 page_size;
- u32 chunk_size;
- u32 first_pm_pbl_index;
- enum i40iw_addressing_type addr_type;
- i40iw_stag_index stag_idx;
- u16 access_rights;
- u16 pd_id;
- i40iw_stag_key stag_key;
- bool use_hmc_fcn_index;
- u8 hmc_fcn_index;
- bool use_pf_rid;
-};
-
-struct i40iw_fast_reg_stag_info {
- u64 wr_id;
- u64 reg_addr_pa;
- u64 fbo;
- void *va;
- u64 total_len;
- u32 page_size;
- u32 chunk_size;
- u32 first_pm_pbl_index;
- enum i40iw_addressing_type addr_type;
- i40iw_stag_index stag_idx;
- u16 access_rights;
- u16 pd_id;
- i40iw_stag_key stag_key;
- bool local_fence;
- bool read_fence;
- bool signaled;
- bool use_hmc_fcn_index;
- u8 hmc_fcn_index;
- bool use_pf_rid;
- bool defer_flag;
-};
-
-struct i40iw_dealloc_stag_info {
- u32 stag_idx;
- u16 pd_id;
- bool mr;
- bool dealloc_pbl;
-};
-
-struct i40iw_register_shared_stag {
- void *va;
- enum i40iw_addressing_type addr_type;
- i40iw_stag_index new_stag_idx;
- i40iw_stag_index parent_stag_idx;
- u32 access_rights;
- u16 pd_id;
- i40iw_stag_key new_stag_key;
-};
-
-struct i40iw_qp_init_info {
- struct i40iw_qp_uk_init_info qp_uk_init_info;
- struct i40iw_sc_pd *pd;
- struct i40iw_sc_vsi *vsi;
- u64 *host_ctx;
- u8 *q2;
- u64 sq_pa;
- u64 rq_pa;
- u64 host_ctx_pa;
- u64 q2_pa;
- u64 shadow_area_pa;
- int abi_ver;
- u8 sq_tph_val;
- u8 rq_tph_val;
- u8 type;
- bool sq_tph_en;
- bool rq_tph_en;
- bool rcv_tph_en;
- bool xmit_tph_en;
- bool virtual_map;
-};
-
-struct i40iw_cq_init_info {
- struct i40iw_sc_dev *dev;
- u64 cq_base_pa;
- u64 shadow_area_pa;
- u32 ceq_id;
- u32 shadow_read_threshold;
- bool virtual_map;
- bool ceqe_mask;
- u8 pbl_chunk_size;
- u32 first_pm_pbl_idx;
- bool ceq_id_valid;
- bool tph_en;
- u8 tph_val;
- u8 type;
- struct i40iw_cq_uk_init_info cq_uk_init_info;
-};
-
-struct i40iw_upload_context_info {
- u64 buf_pa;
- bool freeze_qp;
- bool raw_format;
- u32 qp_id;
- u8 qp_type;
-};
-
-struct i40iw_add_arp_cache_entry_info {
- u8 mac_addr[6];
- u32 reach_max;
- u16 arp_index;
- bool permanent;
-};
-
-struct i40iw_apbvt_info {
- u16 port;
- bool add;
-};
-
-enum i40iw_quad_entry_type {
- I40IW_QHASH_TYPE_TCP_ESTABLISHED = 1,
- I40IW_QHASH_TYPE_TCP_SYN,
-};
-
-enum i40iw_quad_hash_manage_type {
- I40IW_QHASH_MANAGE_TYPE_DELETE = 0,
- I40IW_QHASH_MANAGE_TYPE_ADD,
- I40IW_QHASH_MANAGE_TYPE_MODIFY
-};
-
-struct i40iw_qhash_table_info {
- struct i40iw_sc_vsi *vsi;
- enum i40iw_quad_hash_manage_type manage;
- enum i40iw_quad_entry_type entry_type;
- bool vlan_valid;
- bool ipv4_valid;
- u8 mac_addr[6];
- u16 vlan_id;
- u8 user_pri;
- u32 qp_num;
- u32 dest_ip[4];
- u32 src_ip[4];
- u16 dest_port;
- u16 src_port;
-};
-
-struct i40iw_local_mac_ipaddr_entry_info {
- u8 mac_addr[6];
- u8 entry_idx;
-};
-
-struct i40iw_qp_flush_info {
- u16 sq_minor_code;
- u16 sq_major_code;
- u16 rq_minor_code;
- u16 rq_major_code;
- u16 ae_code;
- u8 ae_source;
- bool sq;
- bool rq;
- bool userflushcode;
- bool generate_ae;
-};
-
-struct i40iw_cqp_commit_fpm_values {
- u64 qp_base;
- u64 cq_base;
- u32 hte_base;
- u32 arp_base;
- u32 apbvt_inuse_base;
- u32 mr_base;
- u32 xf_base;
- u32 xffl_base;
- u32 q1_base;
- u32 q1fl_base;
- u32 fsimc_base;
- u32 fsiav_base;
- u32 pbl_base;
-
- u32 qp_cnt;
- u32 cq_cnt;
- u32 hte_cnt;
- u32 arp_cnt;
- u32 mr_cnt;
- u32 xf_cnt;
- u32 xffl_cnt;
- u32 q1_cnt;
- u32 q1fl_cnt;
- u32 fsimc_cnt;
- u32 fsiav_cnt;
- u32 pbl_cnt;
-};
-
-struct i40iw_cqp_query_fpm_values {
- u16 first_pe_sd_index;
- u32 qp_objsize;
- u32 cq_objsize;
- u32 hte_objsize;
- u32 arp_objsize;
- u32 mr_objsize;
- u32 xf_objsize;
- u32 q1_objsize;
- u32 fsimc_objsize;
- u32 fsiav_objsize;
-
- u32 qp_max;
- u32 cq_max;
- u32 hte_max;
- u32 arp_max;
- u32 mr_max;
- u32 xf_max;
- u32 xffl_max;
- u32 q1_max;
- u32 q1fl_max;
- u32 fsimc_max;
- u32 fsiav_max;
- u32 pbl_max;
-};
-
-struct i40iw_gen_ae_info {
- u16 ae_code;
- u8 ae_source;
-};
-
-struct i40iw_cqp_ops {
- enum i40iw_status_code (*cqp_init)(struct i40iw_sc_cqp *,
- struct i40iw_cqp_init_info *);
- enum i40iw_status_code (*cqp_create)(struct i40iw_sc_cqp *, u16 *, u16 *);
- void (*cqp_post_sq)(struct i40iw_sc_cqp *);
- u64 *(*cqp_get_next_send_wqe)(struct i40iw_sc_cqp *, u64 scratch);
- enum i40iw_status_code (*cqp_destroy)(struct i40iw_sc_cqp *);
- enum i40iw_status_code (*poll_for_cqp_op_done)(struct i40iw_sc_cqp *, u8,
- struct i40iw_ccq_cqe_info *);
-};
-
-struct i40iw_ccq_ops {
- enum i40iw_status_code (*ccq_init)(struct i40iw_sc_cq *,
- struct i40iw_ccq_init_info *);
- enum i40iw_status_code (*ccq_create)(struct i40iw_sc_cq *, u64, bool, bool);
- enum i40iw_status_code (*ccq_destroy)(struct i40iw_sc_cq *, u64, bool);
- enum i40iw_status_code (*ccq_create_done)(struct i40iw_sc_cq *);
- enum i40iw_status_code (*ccq_get_cqe_info)(struct i40iw_sc_cq *,
- struct i40iw_ccq_cqe_info *);
- void (*ccq_arm)(struct i40iw_sc_cq *);
-};
-
-struct i40iw_ceq_ops {
- enum i40iw_status_code (*ceq_init)(struct i40iw_sc_ceq *,
- struct i40iw_ceq_init_info *);
- enum i40iw_status_code (*ceq_create)(struct i40iw_sc_ceq *, u64, bool);
- enum i40iw_status_code (*cceq_create_done)(struct i40iw_sc_ceq *);
- enum i40iw_status_code (*cceq_destroy_done)(struct i40iw_sc_ceq *);
- enum i40iw_status_code (*cceq_create)(struct i40iw_sc_ceq *, u64);
- enum i40iw_status_code (*ceq_destroy)(struct i40iw_sc_ceq *, u64, bool);
- void *(*process_ceq)(struct i40iw_sc_dev *, struct i40iw_sc_ceq *);
-};
-
-struct i40iw_aeq_ops {
- enum i40iw_status_code (*aeq_init)(struct i40iw_sc_aeq *,
- struct i40iw_aeq_init_info *);
- enum i40iw_status_code (*aeq_create)(struct i40iw_sc_aeq *, u64, bool);
- enum i40iw_status_code (*aeq_destroy)(struct i40iw_sc_aeq *, u64, bool);
- enum i40iw_status_code (*get_next_aeqe)(struct i40iw_sc_aeq *,
- struct i40iw_aeqe_info *);
- enum i40iw_status_code (*repost_aeq_entries)(struct i40iw_sc_dev *, u32);
- enum i40iw_status_code (*aeq_create_done)(struct i40iw_sc_aeq *);
- enum i40iw_status_code (*aeq_destroy_done)(struct i40iw_sc_aeq *);
-};
-
-struct i40iw_pd_ops {
- void (*pd_init)(struct i40iw_sc_dev *, struct i40iw_sc_pd *, u16, int);
-};
-
-struct i40iw_priv_qp_ops {
- enum i40iw_status_code (*qp_init)(struct i40iw_sc_qp *, struct i40iw_qp_init_info *);
- enum i40iw_status_code (*qp_create)(struct i40iw_sc_qp *,
- struct i40iw_create_qp_info *, u64, bool);
- enum i40iw_status_code (*qp_modify)(struct i40iw_sc_qp *,
- struct i40iw_modify_qp_info *, u64, bool);
- enum i40iw_status_code (*qp_destroy)(struct i40iw_sc_qp *, u64, bool, bool, bool);
- enum i40iw_status_code (*qp_flush_wqes)(struct i40iw_sc_qp *,
- struct i40iw_qp_flush_info *, u64, bool);
- enum i40iw_status_code (*qp_upload_context)(struct i40iw_sc_dev *,
- struct i40iw_upload_context_info *,
- u64, bool);
- enum i40iw_status_code (*qp_setctx)(struct i40iw_sc_qp *, u64 *,
- struct i40iw_qp_host_ctx_info *);
-
- void (*qp_send_lsmm)(struct i40iw_sc_qp *, void *, u32, i40iw_stag);
- void (*qp_send_lsmm_nostag)(struct i40iw_sc_qp *, void *, u32);
- void (*qp_send_rtt)(struct i40iw_sc_qp *, bool);
- enum i40iw_status_code (*qp_post_wqe0)(struct i40iw_sc_qp *, u8);
- enum i40iw_status_code (*iw_mr_fast_register)(struct i40iw_sc_qp *,
- struct i40iw_fast_reg_stag_info *,
- bool);
-};
-
-struct i40iw_priv_cq_ops {
- enum i40iw_status_code (*cq_init)(struct i40iw_sc_cq *, struct i40iw_cq_init_info *);
- enum i40iw_status_code (*cq_create)(struct i40iw_sc_cq *, u64, bool, bool);
- enum i40iw_status_code (*cq_destroy)(struct i40iw_sc_cq *, u64, bool);
- enum i40iw_status_code (*cq_modify)(struct i40iw_sc_cq *,
- struct i40iw_modify_cq_info *, u64, bool);
-};
-
-struct i40iw_mr_ops {
- enum i40iw_status_code (*alloc_stag)(struct i40iw_sc_dev *,
- struct i40iw_allocate_stag_info *, u64, bool);
- enum i40iw_status_code (*mr_reg_non_shared)(struct i40iw_sc_dev *,
- struct i40iw_reg_ns_stag_info *,
- u64, bool);
- enum i40iw_status_code (*mr_reg_shared)(struct i40iw_sc_dev *,
- struct i40iw_register_shared_stag *,
- u64, bool);
- enum i40iw_status_code (*dealloc_stag)(struct i40iw_sc_dev *,
- struct i40iw_dealloc_stag_info *,
- u64, bool);
- enum i40iw_status_code (*query_stag)(struct i40iw_sc_dev *, u64, u32, bool);
- enum i40iw_status_code (*mw_alloc)(struct i40iw_sc_dev *, u64, u32, u16, bool);
-};
-
-struct i40iw_cqp_misc_ops {
- enum i40iw_status_code (*manage_hmc_pm_func_table)(struct i40iw_sc_cqp *,
- u64, u8, bool, bool);
- enum i40iw_status_code (*set_hmc_resource_profile)(struct i40iw_sc_cqp *,
- u64, u8, u8, bool, bool);
- enum i40iw_status_code (*commit_fpm_values)(struct i40iw_sc_cqp *, u64, u8,
- struct i40iw_dma_mem *, bool, u8);
- enum i40iw_status_code (*query_fpm_values)(struct i40iw_sc_cqp *, u64, u8,
- struct i40iw_dma_mem *, bool, u8);
- enum i40iw_status_code (*static_hmc_pages_allocated)(struct i40iw_sc_cqp *,
- u64, u8, bool, bool);
- enum i40iw_status_code (*add_arp_cache_entry)(struct i40iw_sc_cqp *,
- struct i40iw_add_arp_cache_entry_info *,
- u64, bool);
- enum i40iw_status_code (*del_arp_cache_entry)(struct i40iw_sc_cqp *, u64, u16, bool);
- enum i40iw_status_code (*query_arp_cache_entry)(struct i40iw_sc_cqp *, u64, u16, bool);
- enum i40iw_status_code (*manage_apbvt_entry)(struct i40iw_sc_cqp *,
- struct i40iw_apbvt_info *, u64, bool);
- enum i40iw_status_code (*manage_qhash_table_entry)(struct i40iw_sc_cqp *,
- struct i40iw_qhash_table_info *, u64, bool);
- enum i40iw_status_code (*alloc_local_mac_ipaddr_table_entry)(struct i40iw_sc_cqp *, u64, bool);
- enum i40iw_status_code (*add_local_mac_ipaddr_entry)(struct i40iw_sc_cqp *,
- struct i40iw_local_mac_ipaddr_entry_info *,
- u64, bool);
- enum i40iw_status_code (*del_local_mac_ipaddr_entry)(struct i40iw_sc_cqp *, u64, u8, u8, bool);
- enum i40iw_status_code (*cqp_nop)(struct i40iw_sc_cqp *, u64, bool);
- enum i40iw_status_code (*commit_fpm_values_done)(struct i40iw_sc_cqp
- *);
- enum i40iw_status_code (*query_fpm_values_done)(struct i40iw_sc_cqp *);
- enum i40iw_status_code (*manage_hmc_pm_func_table_done)(struct i40iw_sc_cqp *);
- enum i40iw_status_code (*update_suspend_qp)(struct i40iw_sc_cqp *, struct i40iw_sc_qp *, u64);
- enum i40iw_status_code (*update_resume_qp)(struct i40iw_sc_cqp *, struct i40iw_sc_qp *, u64);
-};
-
-struct i40iw_hmc_ops {
- enum i40iw_status_code (*init_iw_hmc)(struct i40iw_sc_dev *, u8);
- enum i40iw_status_code (*parse_fpm_query_buf)(u64 *, struct i40iw_hmc_info *,
- struct i40iw_hmc_fpm_misc *);
- enum i40iw_status_code (*configure_iw_fpm)(struct i40iw_sc_dev *, u8);
- enum i40iw_status_code (*parse_fpm_commit_buf)(u64 *, struct i40iw_hmc_obj_info *, u32 *sd);
- enum i40iw_status_code (*create_hmc_object)(struct i40iw_sc_dev *dev,
- struct i40iw_hmc_create_obj_info *);
- enum i40iw_status_code (*del_hmc_object)(struct i40iw_sc_dev *dev,
- struct i40iw_hmc_del_obj_info *,
- bool reset);
- enum i40iw_status_code (*pf_init_vfhmc)(struct i40iw_sc_dev *, u8, u32 *);
- enum i40iw_status_code (*vf_configure_vffpm)(struct i40iw_sc_dev *, u32 *);
-};
-
-struct cqp_info {
- union {
- struct {
- struct i40iw_sc_qp *qp;
- struct i40iw_create_qp_info info;
- u64 scratch;
- } qp_create;
-
- struct {
- struct i40iw_sc_qp *qp;
- struct i40iw_modify_qp_info info;
- u64 scratch;
- } qp_modify;
-
- struct {
- struct i40iw_sc_qp *qp;
- u64 scratch;
- bool remove_hash_idx;
- bool ignore_mw_bnd;
- } qp_destroy;
-
- struct {
- struct i40iw_sc_cq *cq;
- u64 scratch;
- bool check_overflow;
- } cq_create;
-
- struct {
- struct i40iw_sc_cq *cq;
- u64 scratch;
- } cq_destroy;
-
- struct {
- struct i40iw_sc_dev *dev;
- struct i40iw_allocate_stag_info info;
- u64 scratch;
- } alloc_stag;
-
- struct {
- struct i40iw_sc_dev *dev;
- u64 scratch;
- u32 mw_stag_index;
- u16 pd_id;
- } mw_alloc;
-
- struct {
- struct i40iw_sc_dev *dev;
- struct i40iw_reg_ns_stag_info info;
- u64 scratch;
- } mr_reg_non_shared;
-
- struct {
- struct i40iw_sc_dev *dev;
- struct i40iw_dealloc_stag_info info;
- u64 scratch;
- } dealloc_stag;
-
- struct {
- struct i40iw_sc_cqp *cqp;
- struct i40iw_local_mac_ipaddr_entry_info info;
- u64 scratch;
- } add_local_mac_ipaddr_entry;
-
- struct {
- struct i40iw_sc_cqp *cqp;
- struct i40iw_add_arp_cache_entry_info info;
- u64 scratch;
- } add_arp_cache_entry;
-
- struct {
- struct i40iw_sc_cqp *cqp;
- u64 scratch;
- u8 entry_idx;
- u8 ignore_ref_count;
- } del_local_mac_ipaddr_entry;
-
- struct {
- struct i40iw_sc_cqp *cqp;
- u64 scratch;
- u16 arp_index;
- } del_arp_cache_entry;
-
- struct {
- struct i40iw_sc_cqp *cqp;
- struct i40iw_manage_vf_pble_info info;
- u64 scratch;
- } manage_vf_pble_bp;
-
- struct {
- struct i40iw_sc_dev *dev;
- struct i40iw_upload_context_info info;
- u64 scratch;
- } qp_upload_context;
-
- struct {
- struct i40iw_sc_cqp *cqp;
- u64 scratch;
- } alloc_local_mac_ipaddr_entry;
-
- struct {
- struct i40iw_sc_dev *dev;
- struct i40iw_hmc_fcn_info info;
- u64 scratch;
- } manage_hmc_pm;
-
- struct {
- struct i40iw_sc_ceq *ceq;
- u64 scratch;
- } ceq_create;
-
- struct {
- struct i40iw_sc_ceq *ceq;
- u64 scratch;
- } ceq_destroy;
-
- struct {
- struct i40iw_sc_aeq *aeq;
- u64 scratch;
- } aeq_create;
-
- struct {
- struct i40iw_sc_aeq *aeq;
- u64 scratch;
- } aeq_destroy;
-
- struct {
- struct i40iw_sc_qp *qp;
- struct i40iw_qp_flush_info info;
- u64 scratch;
- } qp_flush_wqes;
-
- struct {
- struct i40iw_sc_qp *qp;
- struct i40iw_gen_ae_info info;
- u64 scratch;
- } gen_ae;
-
- struct {
- struct i40iw_sc_cqp *cqp;
- void *fpm_values_va;
- u64 fpm_values_pa;
- u8 hmc_fn_id;
- u64 scratch;
- } query_fpm_values;
-
- struct {
- struct i40iw_sc_cqp *cqp;
- void *fpm_values_va;
- u64 fpm_values_pa;
- u8 hmc_fn_id;
- u64 scratch;
- } commit_fpm_values;
-
- struct {
- struct i40iw_sc_cqp *cqp;
- struct i40iw_apbvt_info info;
- u64 scratch;
- } manage_apbvt_entry;
-
- struct {
- struct i40iw_sc_cqp *cqp;
- struct i40iw_qhash_table_info info;
- u64 scratch;
- } manage_qhash_table_entry;
-
- struct {
- struct i40iw_sc_dev *dev;
- struct i40iw_update_sds_info info;
- u64 scratch;
- } update_pe_sds;
-
- struct {
- struct i40iw_sc_cqp *cqp;
- struct i40iw_sc_qp *qp;
- u64 scratch;
- } suspend_resume;
- struct {
- struct i40iw_sc_cqp *cqp;
- void *cap_va;
- u64 cap_pa;
- u64 scratch;
- } query_rdma_features;
- } u;
-};
-
-struct cqp_commands_info {
- struct list_head cqp_cmd_entry;
- u8 cqp_cmd;
- u8 post_sq;
- struct cqp_info in;
-};
-
-struct i40iw_virtchnl_work_info {
- void (*callback_fcn)(void *vf_dev);
- void *worker_vf_dev;
-};
-
-struct i40iw_cqp_timeout {
- u64 compl_cqp_cmds;
- u8 count;
-};
-
-#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_uk.c b/drivers/infiniband/hw/i40iw/i40iw_uk.c
deleted file mode 100644
index f521be16bf31..000000000000
--- a/drivers/infiniband/hw/i40iw/i40iw_uk.c
+++ /dev/null
@@ -1,1200 +0,0 @@
-/*******************************************************************************
-*
-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
-*
-* This software is available to you under a choice of one of two
-* licenses. You may choose to be licensed under the terms of the GNU
-* General Public License (GPL) Version 2, available from the file
-* COPYING in the main directory of this source tree, or the
-* OpenFabrics.org BSD license below:
-*
-* Redistribution and use in source and binary forms, with or
-* without modification, are permitted provided that the following
-* conditions are met:
-*
-* - Redistributions of source code must retain the above
-* copyright notice, this list of conditions and the following
-* disclaimer.
-*
-* - Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials
-* provided with the distribution.
-*
-* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-* SOFTWARE.
-*
-*******************************************************************************/
-
-#include "i40iw_osdep.h"
-#include "i40iw_status.h"
-#include "i40iw_d.h"
-#include "i40iw_user.h"
-#include "i40iw_register.h"
-
-static u32 nop_signature = 0x55550000;
-
-/**
- * i40iw_nop_1 - insert a nop wqe and move head. no post work
- * @qp: hw qp ptr
- */
-static enum i40iw_status_code i40iw_nop_1(struct i40iw_qp_uk *qp)
-{
- u64 header, *wqe;
- u64 *wqe_0 = NULL;
- u32 wqe_idx, peek_head;
- bool signaled = false;
-
- if (!qp->sq_ring.head)
- return I40IW_ERR_PARAM;
-
- wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
- wqe = qp->sq_base[wqe_idx].elem;
-
- qp->sq_wrtrk_array[wqe_idx].wqe_size = I40IW_QP_WQE_MIN_SIZE;
-
- peek_head = (qp->sq_ring.head + 1) % qp->sq_ring.size;
- wqe_0 = qp->sq_base[peek_head].elem;
- if (peek_head)
- wqe_0[3] = LS_64(!qp->swqe_polarity, I40IWQPSQ_VALID);
- else
- wqe_0[3] = LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
-
- set_64bit_val(wqe, 0, 0);
- set_64bit_val(wqe, 8, 0);
- set_64bit_val(wqe, 16, 0);
-
- header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) |
- LS_64(signaled, I40IWQPSQ_SIGCOMPL) |
- LS_64(qp->swqe_polarity, I40IWQPSQ_VALID) | nop_signature++;
-
- wmb(); /* Memory barrier to ensure data is written before valid bit is set */
-
- set_64bit_val(wqe, 24, header);
- return 0;
-}
-
-/**
- * i40iw_qp_post_wr - post wr to hrdware
- * @qp: hw qp ptr
- */
-void i40iw_qp_post_wr(struct i40iw_qp_uk *qp)
-{
- u64 temp;
- u32 hw_sq_tail;
- u32 sw_sq_head;
-
- mb(); /* valid bit is written and loads completed before reading shadow */
-
- /* read the doorbell shadow area */
- get_64bit_val(qp->shadow_area, 0, &temp);
-
- hw_sq_tail = (u32)RS_64(temp, I40IW_QP_DBSA_HW_SQ_TAIL);
- sw_sq_head = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
- if (sw_sq_head != hw_sq_tail) {
- if (sw_sq_head > qp->initial_ring.head) {
- if ((hw_sq_tail >= qp->initial_ring.head) &&
- (hw_sq_tail < sw_sq_head)) {
- writel(qp->qp_id, qp->wqe_alloc_reg);
- }
- } else if (sw_sq_head != qp->initial_ring.head) {
- if ((hw_sq_tail >= qp->initial_ring.head) ||
- (hw_sq_tail < sw_sq_head)) {
- writel(qp->qp_id, qp->wqe_alloc_reg);
- }
- }
- }
-
- qp->initial_ring.head = qp->sq_ring.head;
-}
-
-/**
- * i40iw_qp_get_next_send_wqe - return next wqe ptr
- * @qp: hw qp ptr
- * @wqe_idx: return wqe index
- * @wqe_size: size of sq wqe
- * @total_size: work request length
- * @wr_id: work request id
- */
-u64 *i40iw_qp_get_next_send_wqe(struct i40iw_qp_uk *qp,
- u32 *wqe_idx,
- u8 wqe_size,
- u32 total_size,
- u64 wr_id
- )
-{
- u64 *wqe = NULL;
- u64 wqe_ptr;
- u32 peek_head = 0;
- u16 offset;
- enum i40iw_status_code ret_code = 0;
- u8 nop_wqe_cnt = 0, i;
- u64 *wqe_0 = NULL;
-
- *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
-
- if (!*wqe_idx)
- qp->swqe_polarity = !qp->swqe_polarity;
- wqe_ptr = (uintptr_t)qp->sq_base[*wqe_idx].elem;
- offset = (u16)(wqe_ptr) & 0x7F;
- if ((offset + wqe_size) > I40IW_QP_WQE_MAX_SIZE) {
- nop_wqe_cnt = (u8)(I40IW_QP_WQE_MAX_SIZE - offset) / I40IW_QP_WQE_MIN_SIZE;
- for (i = 0; i < nop_wqe_cnt; i++) {
- i40iw_nop_1(qp);
- I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);
- if (ret_code)
- return NULL;
- }
-
- *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
- if (!*wqe_idx)
- qp->swqe_polarity = !qp->swqe_polarity;
- }
-
- if (((*wqe_idx & 3) == 1) && (wqe_size == I40IW_WQE_SIZE_64)) {
- i40iw_nop_1(qp);
- I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);
- if (ret_code)
- return NULL;
- *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
- if (!*wqe_idx)
- qp->swqe_polarity = !qp->swqe_polarity;
- }
- I40IW_RING_MOVE_HEAD_BY_COUNT(qp->sq_ring,
- wqe_size / I40IW_QP_WQE_MIN_SIZE, ret_code);
- if (ret_code)
- return NULL;
-
- wqe = qp->sq_base[*wqe_idx].elem;
-
- peek_head = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
- wqe_0 = qp->sq_base[peek_head].elem;
-
- if (((peek_head & 3) == 1) || ((peek_head & 3) == 3)) {
- if (RS_64(wqe_0[3], I40IWQPSQ_VALID) != !qp->swqe_polarity)
- wqe_0[3] = LS_64(!qp->swqe_polarity, I40IWQPSQ_VALID);
- }
-
- qp->sq_wrtrk_array[*wqe_idx].wrid = wr_id;
- qp->sq_wrtrk_array[*wqe_idx].wr_len = total_size;
- qp->sq_wrtrk_array[*wqe_idx].wqe_size = wqe_size;
- return wqe;
-}
-
-/**
- * i40iw_set_fragment - set fragment in wqe
- * @wqe: wqe for setting fragment
- * @offset: offset value
- * @sge: sge length and stag
- */
-static void i40iw_set_fragment(u64 *wqe, u32 offset, struct i40iw_sge *sge)
-{
- if (sge) {
- set_64bit_val(wqe, offset, LS_64(sge->tag_off, I40IWQPSQ_FRAG_TO));
- set_64bit_val(wqe, (offset + 8),
- (LS_64(sge->len, I40IWQPSQ_FRAG_LEN) |
- LS_64(sge->stag, I40IWQPSQ_FRAG_STAG)));
- }
-}
-
-/**
- * i40iw_qp_get_next_recv_wqe - get next qp's rcv wqe
- * @qp: hw qp ptr
- * @wqe_idx: return wqe index
- */
-u64 *i40iw_qp_get_next_recv_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx)
-{
- u64 *wqe = NULL;
- enum i40iw_status_code ret_code;
-
- if (I40IW_RING_FULL_ERR(qp->rq_ring))
- return NULL;
-
- I40IW_ATOMIC_RING_MOVE_HEAD(qp->rq_ring, *wqe_idx, ret_code);
- if (ret_code)
- return NULL;
- if (!*wqe_idx)
- qp->rwqe_polarity = !qp->rwqe_polarity;
- /* rq_wqe_size_multiplier is no of qwords in one rq wqe */
- wqe = qp->rq_base[*wqe_idx * (qp->rq_wqe_size_multiplier >> 2)].elem;
-
- return wqe;
-}
-
-/**
- * i40iw_rdma_write - rdma write operation
- * @qp: hw qp ptr
- * @info: post sq information
- * @post_sq: flag to post sq
- */
-static enum i40iw_status_code i40iw_rdma_write(struct i40iw_qp_uk *qp,
- struct i40iw_post_sq_info *info,
- bool post_sq)
-{
- u64 header;
- u64 *wqe;
- struct i40iw_rdma_write *op_info;
- u32 i, wqe_idx;
- u32 total_size = 0, byte_off;
- enum i40iw_status_code ret_code;
- bool read_fence = false;
- u8 wqe_size;
-
- op_info = &info->op.rdma_write;
- if (op_info->num_lo_sges > qp->max_sq_frag_cnt)
- return I40IW_ERR_INVALID_FRAG_COUNT;
-
- for (i = 0; i < op_info->num_lo_sges; i++)
- total_size += op_info->lo_sg_list[i].len;
-
- if (total_size > I40IW_MAX_OUTBOUND_MESSAGE_SIZE)
- return I40IW_ERR_QP_INVALID_MSG_SIZE;
-
- read_fence |= info->read_fence;
-
- ret_code = i40iw_fragcnt_to_wqesize_sq(op_info->num_lo_sges, &wqe_size);
- if (ret_code)
- return ret_code;
-
- wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, total_size, info->wr_id);
- if (!wqe)
- return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
- set_64bit_val(wqe, 16,
- LS_64(op_info->rem_addr.tag_off, I40IWQPSQ_FRAG_TO));
- if (!op_info->rem_addr.stag)
- return I40IW_ERR_BAD_STAG;
-
- header = LS_64(op_info->rem_addr.stag, I40IWQPSQ_REMSTAG) |
- LS_64(I40IWQP_OP_RDMA_WRITE, I40IWQPSQ_OPCODE) |
- LS_64((op_info->num_lo_sges > 1 ? (op_info->num_lo_sges - 1) : 0), I40IWQPSQ_ADDFRAGCNT) |
- LS_64(read_fence, I40IWQPSQ_READFENCE) |
- LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |
- LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
- LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
-
- i40iw_set_fragment(wqe, 0, op_info->lo_sg_list);
-
- for (i = 1, byte_off = 32; i < op_info->num_lo_sges; i++) {
- i40iw_set_fragment(wqe, byte_off, &op_info->lo_sg_list[i]);
- byte_off += 16;
- }
-
- wmb(); /* make sure WQE is populated before valid bit is set */
-
- set_64bit_val(wqe, 24, header);
-
- if (post_sq)
- i40iw_qp_post_wr(qp);
-
- return 0;
-}
-
-/**
- * i40iw_rdma_read - rdma read command
- * @qp: hw qp ptr
- * @info: post sq information
- * @inv_stag: flag for inv_stag
- * @post_sq: flag to post sq
- */
-static enum i40iw_status_code i40iw_rdma_read(struct i40iw_qp_uk *qp,
- struct i40iw_post_sq_info *info,
- bool inv_stag,
- bool post_sq)
-{
- u64 *wqe;
- struct i40iw_rdma_read *op_info;
- u64 header;
- u32 wqe_idx;
- enum i40iw_status_code ret_code;
- u8 wqe_size;
- bool local_fence = false;
-
- op_info = &info->op.rdma_read;
- ret_code = i40iw_fragcnt_to_wqesize_sq(1, &wqe_size);
- if (ret_code)
- return ret_code;
- wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, op_info->lo_addr.len, info->wr_id);
- if (!wqe)
- return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
- local_fence |= info->local_fence;
-
- set_64bit_val(wqe, 16, LS_64(op_info->rem_addr.tag_off, I40IWQPSQ_FRAG_TO));
- header = LS_64(op_info->rem_addr.stag, I40IWQPSQ_REMSTAG) |
- LS_64((inv_stag ? I40IWQP_OP_RDMA_READ_LOC_INV : I40IWQP_OP_RDMA_READ), I40IWQPSQ_OPCODE) |
- LS_64(info->read_fence, I40IWQPSQ_READFENCE) |
- LS_64(local_fence, I40IWQPSQ_LOCALFENCE) |
- LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
- LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
-
- i40iw_set_fragment(wqe, 0, &op_info->lo_addr);
-
- wmb(); /* make sure WQE is populated before valid bit is set */
-
- set_64bit_val(wqe, 24, header);
- if (post_sq)
- i40iw_qp_post_wr(qp);
-
- return 0;
-}
-
-/**
- * i40iw_send - rdma send command
- * @qp: hw qp ptr
- * @info: post sq information
- * @stag_to_inv: stag_to_inv value
- * @post_sq: flag to post sq
- */
-static enum i40iw_status_code i40iw_send(struct i40iw_qp_uk *qp,
- struct i40iw_post_sq_info *info,
- u32 stag_to_inv,
- bool post_sq)
-{
- u64 *wqe;
- struct i40iw_post_send *op_info;
- u64 header;
- u32 i, wqe_idx, total_size = 0, byte_off;
- enum i40iw_status_code ret_code;
- bool read_fence = false;
- u8 wqe_size;
-
- op_info = &info->op.send;
- if (qp->max_sq_frag_cnt < op_info->num_sges)
- return I40IW_ERR_INVALID_FRAG_COUNT;
-
- for (i = 0; i < op_info->num_sges; i++)
- total_size += op_info->sg_list[i].len;
- ret_code = i40iw_fragcnt_to_wqesize_sq(op_info->num_sges, &wqe_size);
- if (ret_code)
- return ret_code;
-
- wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, total_size, info->wr_id);
- if (!wqe)
- return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
-
- read_fence |= info->read_fence;
- set_64bit_val(wqe, 16, 0);
- header = LS_64(stag_to_inv, I40IWQPSQ_REMSTAG) |
- LS_64(info->op_type, I40IWQPSQ_OPCODE) |
- LS_64((op_info->num_sges > 1 ? (op_info->num_sges - 1) : 0),
- I40IWQPSQ_ADDFRAGCNT) |
- LS_64(read_fence, I40IWQPSQ_READFENCE) |
- LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |
- LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
- LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
-
- i40iw_set_fragment(wqe, 0, op_info->sg_list);
-
- for (i = 1, byte_off = 32; i < op_info->num_sges; i++) {
- i40iw_set_fragment(wqe, byte_off, &op_info->sg_list[i]);
- byte_off += 16;
- }
-
- wmb(); /* make sure WQE is populated before valid bit is set */
-
- set_64bit_val(wqe, 24, header);
- if (post_sq)
- i40iw_qp_post_wr(qp);
-
- return 0;
-}
-
-/**
- * i40iw_inline_rdma_write - inline rdma write operation
- * @qp: hw qp ptr
- * @info: post sq information
- * @post_sq: flag to post sq
- */
-static enum i40iw_status_code i40iw_inline_rdma_write(struct i40iw_qp_uk *qp,
- struct i40iw_post_sq_info *info,
- bool post_sq)
-{
- u64 *wqe;
- u8 *dest, *src;
- struct i40iw_inline_rdma_write *op_info;
- u64 header = 0;
- u32 wqe_idx;
- enum i40iw_status_code ret_code;
- bool read_fence = false;
- u8 wqe_size;
-
- op_info = &info->op.inline_rdma_write;
- if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE)
- return I40IW_ERR_INVALID_INLINE_DATA_SIZE;
-
- ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size);
- if (ret_code)
- return ret_code;
-
- wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, op_info->len, info->wr_id);
- if (!wqe)
- return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
-
- read_fence |= info->read_fence;
- set_64bit_val(wqe, 16,
- LS_64(op_info->rem_addr.tag_off, I40IWQPSQ_FRAG_TO));
-
- header = LS_64(op_info->rem_addr.stag, I40IWQPSQ_REMSTAG) |
- LS_64(I40IWQP_OP_RDMA_WRITE, I40IWQPSQ_OPCODE) |
- LS_64(op_info->len, I40IWQPSQ_INLINEDATALEN) |
- LS_64(1, I40IWQPSQ_INLINEDATAFLAG) |
- LS_64(read_fence, I40IWQPSQ_READFENCE) |
- LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |
- LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
- LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
-
- dest = (u8 *)wqe;
- src = (u8 *)(op_info->data);
-
- if (op_info->len <= 16) {
- memcpy(dest, src, op_info->len);
- } else {
- memcpy(dest, src, 16);
- src += 16;
- dest = (u8 *)wqe + 32;
- memcpy(dest, src, op_info->len - 16);
- }
-
- wmb(); /* make sure WQE is populated before valid bit is set */
-
- set_64bit_val(wqe, 24, header);
-
- if (post_sq)
- i40iw_qp_post_wr(qp);
-
- return 0;
-}
-
-/**
- * i40iw_inline_send - inline send operation
- * @qp: hw qp ptr
- * @info: post sq information
- * @stag_to_inv: remote stag
- * @post_sq: flag to post sq
- */
-static enum i40iw_status_code i40iw_inline_send(struct i40iw_qp_uk *qp,
- struct i40iw_post_sq_info *info,
- u32 stag_to_inv,
- bool post_sq)
-{
- u64 *wqe;
- u8 *dest, *src;
- struct i40iw_post_inline_send *op_info;
- u64 header;
- u32 wqe_idx;
- enum i40iw_status_code ret_code;
- bool read_fence = false;
- u8 wqe_size;
-
- op_info = &info->op.inline_send;
- if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE)
- return I40IW_ERR_INVALID_INLINE_DATA_SIZE;
-
- ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size);
- if (ret_code)
- return ret_code;
-
- wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, op_info->len, info->wr_id);
- if (!wqe)
- return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
-
- read_fence |= info->read_fence;
- header = LS_64(stag_to_inv, I40IWQPSQ_REMSTAG) |
- LS_64(info->op_type, I40IWQPSQ_OPCODE) |
- LS_64(op_info->len, I40IWQPSQ_INLINEDATALEN) |
- LS_64(1, I40IWQPSQ_INLINEDATAFLAG) |
- LS_64(read_fence, I40IWQPSQ_READFENCE) |
- LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |
- LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
- LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
-
- dest = (u8 *)wqe;
- src = (u8 *)(op_info->data);
-
- if (op_info->len <= 16) {
- memcpy(dest, src, op_info->len);
- } else {
- memcpy(dest, src, 16);
- src += 16;
- dest = (u8 *)wqe + 32;
- memcpy(dest, src, op_info->len - 16);
- }
-
- wmb(); /* make sure WQE is populated before valid bit is set */
-
- set_64bit_val(wqe, 24, header);
-
- if (post_sq)
- i40iw_qp_post_wr(qp);
-
- return 0;
-}
-
-/**
- * i40iw_stag_local_invalidate - stag invalidate operation
- * @qp: hw qp ptr
- * @info: post sq information
- * @post_sq: flag to post sq
- */
-static enum i40iw_status_code i40iw_stag_local_invalidate(struct i40iw_qp_uk *qp,
- struct i40iw_post_sq_info *info,
- bool post_sq)
-{
- u64 *wqe;
- struct i40iw_inv_local_stag *op_info;
- u64 header;
- u32 wqe_idx;
- bool local_fence = false;
-
- op_info = &info->op.inv_local_stag;
- local_fence = info->local_fence;
-
- wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE, 0, info->wr_id);
- if (!wqe)
- return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
- set_64bit_val(wqe, 0, 0);
- set_64bit_val(wqe, 8,
- LS_64(op_info->target_stag, I40IWQPSQ_LOCSTAG));
- set_64bit_val(wqe, 16, 0);
- header = LS_64(I40IW_OP_TYPE_INV_STAG, I40IWQPSQ_OPCODE) |
- LS_64(info->read_fence, I40IWQPSQ_READFENCE) |
- LS_64(local_fence, I40IWQPSQ_LOCALFENCE) |
- LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
- LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
-
- wmb(); /* make sure WQE is populated before valid bit is set */
-
- set_64bit_val(wqe, 24, header);
-
- if (post_sq)
- i40iw_qp_post_wr(qp);
-
- return 0;
-}
-
-/**
- * i40iw_mw_bind - Memory Window bind operation
- * @qp: hw qp ptr
- * @info: post sq information
- * @post_sq: flag to post sq
- */
-static enum i40iw_status_code i40iw_mw_bind(struct i40iw_qp_uk *qp,
- struct i40iw_post_sq_info *info,
- bool post_sq)
-{
- u64 *wqe;
- struct i40iw_bind_window *op_info;
- u64 header;
- u32 wqe_idx;
- bool local_fence = false;
-
- op_info = &info->op.bind_window;
-
- local_fence |= info->local_fence;
- wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE, 0, info->wr_id);
- if (!wqe)
- return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
- set_64bit_val(wqe, 0, (uintptr_t)op_info->va);
- set_64bit_val(wqe, 8,
- LS_64(op_info->mr_stag, I40IWQPSQ_PARENTMRSTAG) |
- LS_64(op_info->mw_stag, I40IWQPSQ_MWSTAG));
- set_64bit_val(wqe, 16, op_info->bind_length);
- header = LS_64(I40IW_OP_TYPE_BIND_MW, I40IWQPSQ_OPCODE) |
- LS_64(((op_info->enable_reads << 2) |
- (op_info->enable_writes << 3)),
- I40IWQPSQ_STAGRIGHTS) |
- LS_64((op_info->addressing_type == I40IW_ADDR_TYPE_VA_BASED ? 1 : 0),
- I40IWQPSQ_VABASEDTO) |
- LS_64(info->read_fence, I40IWQPSQ_READFENCE) |
- LS_64(local_fence, I40IWQPSQ_LOCALFENCE) |
- LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
- LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
-
- wmb(); /* make sure WQE is populated before valid bit is set */
-
- set_64bit_val(wqe, 24, header);
-
- if (post_sq)
- i40iw_qp_post_wr(qp);
-
- return 0;
-}
-
-/**
- * i40iw_post_receive - post receive wqe
- * @qp: hw qp ptr
- * @info: post rq information
- */
-static enum i40iw_status_code i40iw_post_receive(struct i40iw_qp_uk *qp,
- struct i40iw_post_rq_info *info)
-{
- u64 *wqe;
- u64 header;
- u32 total_size = 0, wqe_idx, i, byte_off;
-
- if (qp->max_rq_frag_cnt < info->num_sges)
- return I40IW_ERR_INVALID_FRAG_COUNT;
- for (i = 0; i < info->num_sges; i++)
- total_size += info->sg_list[i].len;
- wqe = i40iw_qp_get_next_recv_wqe(qp, &wqe_idx);
- if (!wqe)
- return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
-
- qp->rq_wrid_array[wqe_idx] = info->wr_id;
- set_64bit_val(wqe, 16, 0);
-
- header = LS_64((info->num_sges > 1 ? (info->num_sges - 1) : 0),
- I40IWQPSQ_ADDFRAGCNT) |
- LS_64(qp->rwqe_polarity, I40IWQPSQ_VALID);
-
- i40iw_set_fragment(wqe, 0, info->sg_list);
-
- for (i = 1, byte_off = 32; i < info->num_sges; i++) {
- i40iw_set_fragment(wqe, byte_off, &info->sg_list[i]);
- byte_off += 16;
- }
-
- wmb(); /* make sure WQE is populated before valid bit is set */
-
- set_64bit_val(wqe, 24, header);
-
- return 0;
-}
-
-/**
- * i40iw_cq_request_notification - cq notification request (door bell)
- * @cq: hw cq
- * @cq_notify: notification type
- */
-static void i40iw_cq_request_notification(struct i40iw_cq_uk *cq,
- enum i40iw_completion_notify cq_notify)
-{
- u64 temp_val;
- u16 sw_cq_sel;
- u8 arm_next_se = 0;
- u8 arm_next = 0;
- u8 arm_seq_num;
-
- get_64bit_val(cq->shadow_area, 32, &temp_val);
- arm_seq_num = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_SEQ_NUM);
- arm_seq_num++;
-
- sw_cq_sel = (u16)RS_64(temp_val, I40IW_CQ_DBSA_SW_CQ_SELECT);
- arm_next_se = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_NEXT_SE);
- arm_next_se |= 1;
- if (cq_notify == IW_CQ_COMPL_EVENT)
- arm_next = 1;
- temp_val = LS_64(arm_seq_num, I40IW_CQ_DBSA_ARM_SEQ_NUM) |
- LS_64(sw_cq_sel, I40IW_CQ_DBSA_SW_CQ_SELECT) |
- LS_64(arm_next_se, I40IW_CQ_DBSA_ARM_NEXT_SE) |
- LS_64(arm_next, I40IW_CQ_DBSA_ARM_NEXT);
-
- set_64bit_val(cq->shadow_area, 32, temp_val);
-
- wmb(); /* make sure WQE is populated before valid bit is set */
-
- writel(cq->cq_id, cq->cqe_alloc_reg);
-}
-
-/**
- * i40iw_cq_post_entries - update tail in shadow memory
- * @cq: hw cq
- * @count: # of entries processed
- */
-static enum i40iw_status_code i40iw_cq_post_entries(struct i40iw_cq_uk *cq,
- u8 count)
-{
- I40IW_RING_MOVE_TAIL_BY_COUNT(cq->cq_ring, count);
- set_64bit_val(cq->shadow_area, 0,
- I40IW_RING_GETCURRENT_HEAD(cq->cq_ring));
- return 0;
-}
-
-/**
- * i40iw_cq_poll_completion - get cq completion info
- * @cq: hw cq
- * @info: cq poll information returned
- */
-static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq,
- struct i40iw_cq_poll_info *info)
-{
- u64 comp_ctx, qword0, qword2, qword3, wqe_qword;
- u64 *cqe, *sw_wqe;
- struct i40iw_qp_uk *qp;
- struct i40iw_ring *pring = NULL;
- u32 wqe_idx, q_type, array_idx = 0;
- enum i40iw_status_code ret_code = 0;
- bool move_cq_head = true;
- u8 polarity;
- u8 addl_wqes = 0;
-
- if (cq->avoid_mem_cflct)
- cqe = (u64 *)I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(cq);
- else
- cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(cq);
-
- get_64bit_val(cqe, 24, &qword3);
- polarity = (u8)RS_64(qword3, I40IW_CQ_VALID);
-
- if (polarity != cq->polarity)
- return I40IW_ERR_QUEUE_EMPTY;
-
- q_type = (u8)RS_64(qword3, I40IW_CQ_SQ);
- info->error = (bool)RS_64(qword3, I40IW_CQ_ERROR);
- if (info->error) {
- info->comp_status = I40IW_COMPL_STATUS_FLUSHED;
- info->major_err = (bool)RS_64(qword3, I40IW_CQ_MAJERR);
- info->minor_err = (bool)RS_64(qword3, I40IW_CQ_MINERR);
- } else {
- info->comp_status = I40IW_COMPL_STATUS_SUCCESS;
- }
-
- get_64bit_val(cqe, 0, &qword0);
- get_64bit_val(cqe, 16, &qword2);
-
- info->tcp_seq_num = (u32)RS_64(qword0, I40IWCQ_TCPSEQNUM);
-
- info->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID);
-
- get_64bit_val(cqe, 8, &comp_ctx);
-
- info->solicited_event = (bool)RS_64(qword3, I40IWCQ_SOEVENT);
- info->is_srq = (bool)RS_64(qword3, I40IWCQ_SRQ);
-
- qp = (struct i40iw_qp_uk *)(unsigned long)comp_ctx;
- if (!qp) {
- ret_code = I40IW_ERR_QUEUE_DESTROYED;
- goto exit;
- }
- wqe_idx = (u32)RS_64(qword3, I40IW_CQ_WQEIDX);
- info->qp_handle = (i40iw_qp_handle)(unsigned long)qp;
-
- if (q_type == I40IW_CQE_QTYPE_RQ) {
- array_idx = (wqe_idx * 4) / qp->rq_wqe_size_multiplier;
- if (info->comp_status == I40IW_COMPL_STATUS_FLUSHED) {
- info->wr_id = qp->rq_wrid_array[qp->rq_ring.tail];
- array_idx = qp->rq_ring.tail;
- } else {
- info->wr_id = qp->rq_wrid_array[array_idx];
- }
-
- info->op_type = I40IW_OP_TYPE_REC;
- if (qword3 & I40IWCQ_STAG_MASK) {
- info->stag_invalid_set = true;
- info->inv_stag = (u32)RS_64(qword2, I40IWCQ_INVSTAG);
- } else {
- info->stag_invalid_set = false;
- }
- info->bytes_xfered = (u32)RS_64(qword0, I40IWCQ_PAYLDLEN);
- I40IW_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
- pring = &qp->rq_ring;
- } else {
- if (qp->first_sq_wq) {
- qp->first_sq_wq = false;
- if (!wqe_idx && (qp->sq_ring.head == qp->sq_ring.tail)) {
- I40IW_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
- I40IW_RING_MOVE_TAIL(cq->cq_ring);
- set_64bit_val(cq->shadow_area, 0,
- I40IW_RING_GETCURRENT_HEAD(cq->cq_ring));
- memset(info, 0, sizeof(struct i40iw_cq_poll_info));
- return i40iw_cq_poll_completion(cq, info);
- }
- }
-
- if (info->comp_status != I40IW_COMPL_STATUS_FLUSHED) {
- info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
- info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len;
-
- info->op_type = (u8)RS_64(qword3, I40IWCQ_OP);
- sw_wqe = qp->sq_base[wqe_idx].elem;
- get_64bit_val(sw_wqe, 24, &wqe_qword);
-
- addl_wqes = qp->sq_wrtrk_array[wqe_idx].wqe_size / I40IW_QP_WQE_MIN_SIZE;
- I40IW_RING_SET_TAIL(qp->sq_ring, (wqe_idx + addl_wqes));
- } else {
- do {
- u8 op_type;
- u32 tail;
-
- tail = qp->sq_ring.tail;
- sw_wqe = qp->sq_base[tail].elem;
- get_64bit_val(sw_wqe, 24, &wqe_qword);
- op_type = (u8)RS_64(wqe_qword, I40IWQPSQ_OPCODE);
- info->op_type = op_type;
- addl_wqes = qp->sq_wrtrk_array[tail].wqe_size / I40IW_QP_WQE_MIN_SIZE;
- I40IW_RING_SET_TAIL(qp->sq_ring, (tail + addl_wqes));
- if (op_type != I40IWQP_OP_NOP) {
- info->wr_id = qp->sq_wrtrk_array[tail].wrid;
- info->bytes_xfered = qp->sq_wrtrk_array[tail].wr_len;
- break;
- }
- } while (1);
- }
- pring = &qp->sq_ring;
- }
-
- ret_code = 0;
-
-exit:
- if (!ret_code &&
- (info->comp_status == I40IW_COMPL_STATUS_FLUSHED))
- if (pring && (I40IW_RING_MORE_WORK(*pring)))
- move_cq_head = false;
-
- if (move_cq_head) {
- I40IW_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
-
- if (I40IW_RING_GETCURRENT_HEAD(cq->cq_ring) == 0)
- cq->polarity ^= 1;
-
- I40IW_RING_MOVE_TAIL(cq->cq_ring);
- set_64bit_val(cq->shadow_area, 0,
- I40IW_RING_GETCURRENT_HEAD(cq->cq_ring));
- } else {
- if (info->is_srq)
- return ret_code;
- qword3 &= ~I40IW_CQ_WQEIDX_MASK;
- qword3 |= LS_64(pring->tail, I40IW_CQ_WQEIDX);
- set_64bit_val(cqe, 24, qword3);
- }
-
- return ret_code;
-}
-
-/**
- * i40iw_get_wqe_shift - get shift count for maximum wqe size
- * @sge: Maximum Scatter Gather Elements wqe
- * @inline_data: Maximum inline data size
- * @shift: Returns the shift needed based on sge
- *
- * Shift can be used to left shift the wqe size based on number of SGEs and inlind data size.
- * For 1 SGE or inline data <= 16, shift = 0 (wqe size of 32 bytes).
- * For 2 or 3 SGEs or inline data <= 48, shift = 1 (wqe size of 64 bytes).
- * Shift of 2 otherwise (wqe size of 128 bytes).
- */
-void i40iw_get_wqe_shift(u32 sge, u32 inline_data, u8 *shift)
-{
- *shift = 0;
- if (sge > 1 || inline_data > 16)
- *shift = (sge < 4 && inline_data <= 48) ? 1 : 2;
-}
-
-/*
- * i40iw_get_sqdepth - get SQ depth (quantas)
- * @sq_size: SQ size
- * @shift: shift which determines size of WQE
- * @sqdepth: depth of SQ
- *
- */
-enum i40iw_status_code i40iw_get_sqdepth(u32 sq_size, u8 shift, u32 *sqdepth)
-{
- *sqdepth = roundup_pow_of_two((sq_size << shift) + I40IW_SQ_RSVD);
-
- if (*sqdepth < (I40IW_QP_SW_MIN_WQSIZE << shift))
- *sqdepth = I40IW_QP_SW_MIN_WQSIZE << shift;
- else if (*sqdepth > I40IW_QP_SW_MAX_SQ_QUANTAS)
- return I40IW_ERR_INVALID_SIZE;
-
- return 0;
-}
-
-/*
- * i40iw_get_rq_depth - get RQ depth (quantas)
- * @rq_size: RQ size
- * @shift: shift which determines size of WQE
- * @rqdepth: depth of RQ
- *
- */
-enum i40iw_status_code i40iw_get_rqdepth(u32 rq_size, u8 shift, u32 *rqdepth)
-{
- *rqdepth = roundup_pow_of_two((rq_size << shift) + I40IW_RQ_RSVD);
-
- if (*rqdepth < (I40IW_QP_SW_MIN_WQSIZE << shift))
- *rqdepth = I40IW_QP_SW_MIN_WQSIZE << shift;
- else if (*rqdepth > I40IW_QP_SW_MAX_RQ_QUANTAS)
- return I40IW_ERR_INVALID_SIZE;
-
- return 0;
-}
-
-static const struct i40iw_qp_uk_ops iw_qp_uk_ops = {
- .iw_qp_post_wr = i40iw_qp_post_wr,
- .iw_rdma_write = i40iw_rdma_write,
- .iw_rdma_read = i40iw_rdma_read,
- .iw_send = i40iw_send,
- .iw_inline_rdma_write = i40iw_inline_rdma_write,
- .iw_inline_send = i40iw_inline_send,
- .iw_stag_local_invalidate = i40iw_stag_local_invalidate,
- .iw_mw_bind = i40iw_mw_bind,
- .iw_post_receive = i40iw_post_receive,
- .iw_post_nop = i40iw_nop
-};
-
-static const struct i40iw_cq_ops iw_cq_ops = {
- .iw_cq_request_notification = i40iw_cq_request_notification,
- .iw_cq_poll_completion = i40iw_cq_poll_completion,
- .iw_cq_post_entries = i40iw_cq_post_entries,
- .iw_cq_clean = i40iw_clean_cq
-};
-
-static const struct i40iw_device_uk_ops iw_device_uk_ops = {
- .iwarp_cq_uk_init = i40iw_cq_uk_init,
- .iwarp_qp_uk_init = i40iw_qp_uk_init,
-};
-
-/**
- * i40iw_qp_uk_init - initialize shared qp
- * @qp: hw qp (user and kernel)
- * @info: qp initialization info
- *
- * initializes the vars used in both user and kernel mode.
- * size of the wqe depends on numbers of max. fragements
- * allowed. Then size of wqe * the number of wqes should be the
- * amount of memory allocated for sq and rq. If srq is used,
- * then rq_base will point to one rq wqe only (not the whole
- * array of wqes)
- */
-enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp,
- struct i40iw_qp_uk_init_info *info)
-{
- enum i40iw_status_code ret_code = 0;
- u32 sq_ring_size;
- u8 sqshift, rqshift;
-
- if (info->max_sq_frag_cnt > I40IW_MAX_WQ_FRAGMENT_COUNT)
- return I40IW_ERR_INVALID_FRAG_COUNT;
-
- if (info->max_rq_frag_cnt > I40IW_MAX_WQ_FRAGMENT_COUNT)
- return I40IW_ERR_INVALID_FRAG_COUNT;
- i40iw_get_wqe_shift(info->max_sq_frag_cnt, info->max_inline_data, &sqshift);
-
- qp->sq_base = info->sq;
- qp->rq_base = info->rq;
- qp->shadow_area = info->shadow_area;
- qp->sq_wrtrk_array = info->sq_wrtrk_array;
- qp->rq_wrid_array = info->rq_wrid_array;
-
- qp->wqe_alloc_reg = info->wqe_alloc_reg;
- qp->qp_id = info->qp_id;
- qp->sq_size = info->sq_size;
- qp->max_sq_frag_cnt = info->max_sq_frag_cnt;
- sq_ring_size = qp->sq_size << sqshift;
-
- I40IW_RING_INIT(qp->sq_ring, sq_ring_size);
- I40IW_RING_INIT(qp->initial_ring, sq_ring_size);
- I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);
- I40IW_RING_MOVE_TAIL(qp->sq_ring);
- I40IW_RING_MOVE_HEAD(qp->initial_ring, ret_code);
- qp->swqe_polarity = 1;
- qp->first_sq_wq = true;
- qp->swqe_polarity_deferred = 1;
- qp->rwqe_polarity = 0;
-
- if (!qp->use_srq) {
- qp->rq_size = info->rq_size;
- qp->max_rq_frag_cnt = info->max_rq_frag_cnt;
- I40IW_RING_INIT(qp->rq_ring, qp->rq_size);
- switch (info->abi_ver) {
- case 4:
- i40iw_get_wqe_shift(info->max_rq_frag_cnt, 0, &rqshift);
- break;
- case 5: /* fallthrough until next ABI version */
- default:
- rqshift = I40IW_MAX_RQ_WQE_SHIFT;
- break;
- }
- qp->rq_wqe_size = rqshift;
- qp->rq_wqe_size_multiplier = 4 << rqshift;
- }
- qp->ops = iw_qp_uk_ops;
-
- return ret_code;
-}
-
-/**
- * i40iw_cq_uk_init - initialize shared cq (user and kernel)
- * @cq: hw cq
- * @info: hw cq initialization info
- */
-enum i40iw_status_code i40iw_cq_uk_init(struct i40iw_cq_uk *cq,
- struct i40iw_cq_uk_init_info *info)
-{
- if ((info->cq_size < I40IW_MIN_CQ_SIZE) ||
- (info->cq_size > I40IW_MAX_CQ_SIZE))
- return I40IW_ERR_INVALID_SIZE;
- cq->cq_base = (struct i40iw_cqe *)info->cq_base;
- cq->cq_id = info->cq_id;
- cq->cq_size = info->cq_size;
- cq->cqe_alloc_reg = info->cqe_alloc_reg;
- cq->shadow_area = info->shadow_area;
- cq->avoid_mem_cflct = info->avoid_mem_cflct;
-
- I40IW_RING_INIT(cq->cq_ring, cq->cq_size);
- cq->polarity = 1;
- cq->ops = iw_cq_ops;
-
- return 0;
-}
-
-/**
- * i40iw_device_init_uk - setup routines for iwarp shared device
- * @dev: iwarp shared (user and kernel)
- */
-void i40iw_device_init_uk(struct i40iw_dev_uk *dev)
-{
- dev->ops_uk = iw_device_uk_ops;
-}
-
-/**
- * i40iw_clean_cq - clean cq entries
- * @queue: completion context
- * @cq: cq to clean
- */
-void i40iw_clean_cq(void *queue, struct i40iw_cq_uk *cq)
-{
- u64 *cqe;
- u64 qword3, comp_ctx;
- u32 cq_head;
- u8 polarity, temp;
-
- cq_head = cq->cq_ring.head;
- temp = cq->polarity;
- do {
- if (cq->avoid_mem_cflct)
- cqe = (u64 *)&(((struct i40iw_extended_cqe *)cq->cq_base)[cq_head]);
- else
- cqe = (u64 *)&cq->cq_base[cq_head];
- get_64bit_val(cqe, 24, &qword3);
- polarity = (u8)RS_64(qword3, I40IW_CQ_VALID);
-
- if (polarity != temp)
- break;
-
- get_64bit_val(cqe, 8, &comp_ctx);
- if ((void *)(unsigned long)comp_ctx == queue)
- set_64bit_val(cqe, 8, 0);
-
- cq_head = (cq_head + 1) % cq->cq_ring.size;
- if (!cq_head)
- temp ^= 1;
- } while (true);
-}
-
-/**
- * i40iw_nop - send a nop
- * @qp: hw qp ptr
- * @wr_id: work request id
- * @signaled: flag if signaled for completion
- * @post_sq: flag to post sq
- */
-enum i40iw_status_code i40iw_nop(struct i40iw_qp_uk *qp,
- u64 wr_id,
- bool signaled,
- bool post_sq)
-{
- u64 header, *wqe;
- u32 wqe_idx;
-
- wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE, 0, wr_id);
- if (!wqe)
- return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
- set_64bit_val(wqe, 0, 0);
- set_64bit_val(wqe, 8, 0);
- set_64bit_val(wqe, 16, 0);
-
- header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) |
- LS_64(signaled, I40IWQPSQ_SIGCOMPL) |
- LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
-
- wmb(); /* make sure WQE is populated before valid bit is set */
-
- set_64bit_val(wqe, 24, header);
- if (post_sq)
- i40iw_qp_post_wr(qp);
-
- return 0;
-}
-
-/**
- * i40iw_fragcnt_to_wqesize_sq - calculate wqe size based on fragment count for SQ
- * @frag_cnt: number of fragments
- * @wqe_size: size of sq wqe returned
- */
-enum i40iw_status_code i40iw_fragcnt_to_wqesize_sq(u32 frag_cnt, u8 *wqe_size)
-{
- switch (frag_cnt) {
- case 0:
- case 1:
- *wqe_size = I40IW_QP_WQE_MIN_SIZE;
- break;
- case 2:
- case 3:
- *wqe_size = 64;
- break;
- case 4:
- case 5:
- *wqe_size = 96;
- break;
- case 6:
- case 7:
- *wqe_size = 128;
- break;
- default:
- return I40IW_ERR_INVALID_FRAG_COUNT;
- }
-
- return 0;
-}
-
-/**
- * i40iw_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ
- * @frag_cnt: number of fragments
- * @wqe_size: size of rq wqe returned
- */
-enum i40iw_status_code i40iw_fragcnt_to_wqesize_rq(u32 frag_cnt, u8 *wqe_size)
-{
- switch (frag_cnt) {
- case 0:
- case 1:
- *wqe_size = 32;
- break;
- case 2:
- case 3:
- *wqe_size = 64;
- break;
- case 4:
- case 5:
- case 6:
- case 7:
- *wqe_size = 128;
- break;
- default:
- return I40IW_ERR_INVALID_FRAG_COUNT;
- }
-
- return 0;
-}
-
-/**
- * i40iw_inline_data_size_to_wqesize - based on inline data, wqe size
- * @data_size: data size for inline
- * @wqe_size: size of sq wqe returned
- */
-enum i40iw_status_code i40iw_inline_data_size_to_wqesize(u32 data_size,
- u8 *wqe_size)
-{
- if (data_size > I40IW_MAX_INLINE_DATA_SIZE)
- return I40IW_ERR_INVALID_INLINE_DATA_SIZE;
-
- if (data_size <= 16)
- *wqe_size = I40IW_QP_WQE_MIN_SIZE;
- else
- *wqe_size = 64;
-
- return 0;
-}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_user.h b/drivers/infiniband/hw/i40iw/i40iw_user.h
deleted file mode 100644
index 93fc3081dd65..000000000000
--- a/drivers/infiniband/hw/i40iw/i40iw_user.h
+++ /dev/null
@@ -1,422 +0,0 @@
-/*******************************************************************************
-*
-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
-*
-* This software is available to you under a choice of one of two
-* licenses. You may choose to be licensed under the terms of the GNU
-* General Public License (GPL) Version 2, available from the file
-* COPYING in the main directory of this source tree, or the
-* OpenFabrics.org BSD license below:
-*
-* Redistribution and use in source and binary forms, with or
-* without modification, are permitted provided that the following
-* conditions are met:
-*
-* - Redistributions of source code must retain the above
-* copyright notice, this list of conditions and the following
-* disclaimer.
-*
-* - Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials
-* provided with the distribution.
-*
-* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-* SOFTWARE.
-*
-*******************************************************************************/
-
-#ifndef I40IW_USER_H
-#define I40IW_USER_H
-
-enum i40iw_device_capabilities_const {
- I40IW_WQE_SIZE = 4,
- I40IW_CQP_WQE_SIZE = 8,
- I40IW_CQE_SIZE = 4,
- I40IW_EXTENDED_CQE_SIZE = 8,
- I40IW_AEQE_SIZE = 2,
- I40IW_CEQE_SIZE = 1,
- I40IW_CQP_CTX_SIZE = 8,
- I40IW_SHADOW_AREA_SIZE = 8,
- I40IW_CEQ_MAX_COUNT = 256,
- I40IW_QUERY_FPM_BUF_SIZE = 128,
- I40IW_COMMIT_FPM_BUF_SIZE = 128,
- I40IW_MIN_IW_QP_ID = 1,
- I40IW_MAX_IW_QP_ID = 262143,
- I40IW_MIN_CEQID = 0,
- I40IW_MAX_CEQID = 256,
- I40IW_MIN_CQID = 0,
- I40IW_MAX_CQID = 131071,
- I40IW_MIN_AEQ_ENTRIES = 1,
- I40IW_MAX_AEQ_ENTRIES = 524287,
- I40IW_MIN_CEQ_ENTRIES = 1,
- I40IW_MAX_CEQ_ENTRIES = 131071,
- I40IW_MIN_CQ_SIZE = 1,
- I40IW_MAX_CQ_SIZE = 1048575,
- I40IW_DB_ID_ZERO = 0,
- I40IW_MAX_WQ_FRAGMENT_COUNT = 3,
- I40IW_MAX_SGE_RD = 1,
- I40IW_MAX_OUTBOUND_MESSAGE_SIZE = 2147483647,
- I40IW_MAX_INBOUND_MESSAGE_SIZE = 2147483647,
- I40IW_MAX_PE_ENABLED_VF_COUNT = 32,
- I40IW_MAX_VF_FPM_ID = 47,
- I40IW_MAX_VF_PER_PF = 127,
- I40IW_MAX_SQ_PAYLOAD_SIZE = 2145386496,
- I40IW_MAX_INLINE_DATA_SIZE = 48,
- I40IW_MAX_IRD_SIZE = 64,
- I40IW_MAX_ORD_SIZE = 127,
- I40IW_MAX_WQ_ENTRIES = 2048,
- I40IW_Q2_BUFFER_SIZE = (248 + 100),
- I40IW_MAX_WQE_SIZE_RQ = 128,
- I40IW_QP_CTX_SIZE = 248,
- I40IW_MAX_PDS = 32768
-};
-
-#define i40iw_handle void *
-#define i40iw_adapter_handle i40iw_handle
-#define i40iw_qp_handle i40iw_handle
-#define i40iw_cq_handle i40iw_handle
-#define i40iw_srq_handle i40iw_handle
-#define i40iw_pd_id i40iw_handle
-#define i40iw_stag_handle i40iw_handle
-#define i40iw_stag_index u32
-#define i40iw_stag u32
-#define i40iw_stag_key u8
-
-#define i40iw_tagged_offset u64
-#define i40iw_access_privileges u32
-#define i40iw_physical_fragment u64
-#define i40iw_address_list u64 *
-
-#define I40IW_MAX_MR_SIZE 0x10000000000L
-#define I40IW_MAX_RQ_WQE_SHIFT 2
-
-struct i40iw_qp_uk;
-struct i40iw_cq_uk;
-struct i40iw_srq_uk;
-struct i40iw_qp_uk_init_info;
-struct i40iw_cq_uk_init_info;
-struct i40iw_srq_uk_init_info;
-
-struct i40iw_sge {
- i40iw_tagged_offset tag_off;
- u32 len;
- i40iw_stag stag;
-};
-
-#define i40iw_sgl struct i40iw_sge *
-
-struct i40iw_ring {
- u32 head;
- u32 tail;
- u32 size;
-};
-
-struct i40iw_cqe {
- u64 buf[I40IW_CQE_SIZE];
-};
-
-struct i40iw_extended_cqe {
- u64 buf[I40IW_EXTENDED_CQE_SIZE];
-};
-
-struct i40iw_wqe {
- u64 buf[I40IW_WQE_SIZE];
-};
-
-struct i40iw_qp_uk_ops;
-
-enum i40iw_addressing_type {
- I40IW_ADDR_TYPE_ZERO_BASED = 0,
- I40IW_ADDR_TYPE_VA_BASED = 1,
-};
-
-#define I40IW_ACCESS_FLAGS_LOCALREAD 0x01
-#define I40IW_ACCESS_FLAGS_LOCALWRITE 0x02
-#define I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY 0x04
-#define I40IW_ACCESS_FLAGS_REMOTEREAD 0x05
-#define I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY 0x08
-#define I40IW_ACCESS_FLAGS_REMOTEWRITE 0x0a
-#define I40IW_ACCESS_FLAGS_BIND_WINDOW 0x10
-#define I40IW_ACCESS_FLAGS_ALL 0x1F
-
-#define I40IW_OP_TYPE_RDMA_WRITE 0
-#define I40IW_OP_TYPE_RDMA_READ 1
-#define I40IW_OP_TYPE_SEND 3
-#define I40IW_OP_TYPE_SEND_INV 4
-#define I40IW_OP_TYPE_SEND_SOL 5
-#define I40IW_OP_TYPE_SEND_SOL_INV 6
-#define I40IW_OP_TYPE_REC 7
-#define I40IW_OP_TYPE_BIND_MW 8
-#define I40IW_OP_TYPE_FAST_REG_NSMR 9
-#define I40IW_OP_TYPE_INV_STAG 10
-#define I40IW_OP_TYPE_RDMA_READ_INV_STAG 11
-#define I40IW_OP_TYPE_NOP 12
-
-enum i40iw_completion_status {
- I40IW_COMPL_STATUS_SUCCESS = 0,
- I40IW_COMPL_STATUS_FLUSHED,
- I40IW_COMPL_STATUS_INVALID_WQE,
- I40IW_COMPL_STATUS_QP_CATASTROPHIC,
- I40IW_COMPL_STATUS_REMOTE_TERMINATION,
- I40IW_COMPL_STATUS_INVALID_STAG,
- I40IW_COMPL_STATUS_BASE_BOUND_VIOLATION,
- I40IW_COMPL_STATUS_ACCESS_VIOLATION,
- I40IW_COMPL_STATUS_INVALID_PD_ID,
- I40IW_COMPL_STATUS_WRAP_ERROR,
- I40IW_COMPL_STATUS_STAG_INVALID_PDID,
- I40IW_COMPL_STATUS_RDMA_READ_ZERO_ORD,
- I40IW_COMPL_STATUS_QP_NOT_PRIVLEDGED,
- I40IW_COMPL_STATUS_STAG_NOT_INVALID,
- I40IW_COMPL_STATUS_INVALID_PHYS_BUFFER_SIZE,
- I40IW_COMPL_STATUS_INVALID_PHYS_BUFFER_ENTRY,
- I40IW_COMPL_STATUS_INVALID_FBO,
- I40IW_COMPL_STATUS_INVALID_LENGTH,
- I40IW_COMPL_STATUS_INVALID_ACCESS,
- I40IW_COMPL_STATUS_PHYS_BUFFER_LIST_TOO_LONG,
- I40IW_COMPL_STATUS_INVALID_VIRT_ADDRESS,
- I40IW_COMPL_STATUS_INVALID_REGION,
- I40IW_COMPL_STATUS_INVALID_WINDOW,
- I40IW_COMPL_STATUS_INVALID_TOTAL_LENGTH
-};
-
-enum i40iw_completion_notify {
- IW_CQ_COMPL_EVENT = 0,
- IW_CQ_COMPL_SOLICITED = 1
-};
-
-struct i40iw_post_send {
- i40iw_sgl sg_list;
- u32 num_sges;
-};
-
-struct i40iw_post_inline_send {
- void *data;
- u32 len;
-};
-
-struct i40iw_rdma_write {
- i40iw_sgl lo_sg_list;
- u32 num_lo_sges;
- struct i40iw_sge rem_addr;
-};
-
-struct i40iw_inline_rdma_write {
- void *data;
- u32 len;
- struct i40iw_sge rem_addr;
-};
-
-struct i40iw_rdma_read {
- struct i40iw_sge lo_addr;
- struct i40iw_sge rem_addr;
-};
-
-struct i40iw_bind_window {
- i40iw_stag mr_stag;
- u64 bind_length;
- void *va;
- enum i40iw_addressing_type addressing_type;
- bool enable_reads;
- bool enable_writes;
- i40iw_stag mw_stag;
-};
-
-struct i40iw_inv_local_stag {
- i40iw_stag target_stag;
-};
-
-struct i40iw_post_sq_info {
- u64 wr_id;
- u8 op_type;
- bool signaled;
- bool read_fence;
- bool local_fence;
- bool inline_data;
- bool defer_flag;
- union {
- struct i40iw_post_send send;
- struct i40iw_rdma_write rdma_write;
- struct i40iw_rdma_read rdma_read;
- struct i40iw_rdma_read rdma_read_inv;
- struct i40iw_bind_window bind_window;
- struct i40iw_inv_local_stag inv_local_stag;
- struct i40iw_inline_rdma_write inline_rdma_write;
- struct i40iw_post_inline_send inline_send;
- } op;
-};
-
-struct i40iw_post_rq_info {
- u64 wr_id;
- i40iw_sgl sg_list;
- u32 num_sges;
-};
-
-struct i40iw_cq_poll_info {
- u64 wr_id;
- i40iw_qp_handle qp_handle;
- u32 bytes_xfered;
- u32 tcp_seq_num;
- u32 qp_id;
- i40iw_stag inv_stag;
- enum i40iw_completion_status comp_status;
- u16 major_err;
- u16 minor_err;
- u8 op_type;
- bool stag_invalid_set;
- bool error;
- bool is_srq;
- bool solicited_event;
-};
-
-struct i40iw_qp_uk_ops {
- void (*iw_qp_post_wr)(struct i40iw_qp_uk *);
- enum i40iw_status_code (*iw_rdma_write)(struct i40iw_qp_uk *,
- struct i40iw_post_sq_info *, bool);
- enum i40iw_status_code (*iw_rdma_read)(struct i40iw_qp_uk *,
- struct i40iw_post_sq_info *, bool, bool);
- enum i40iw_status_code (*iw_send)(struct i40iw_qp_uk *,
- struct i40iw_post_sq_info *, u32, bool);
- enum i40iw_status_code (*iw_inline_rdma_write)(struct i40iw_qp_uk *,
- struct i40iw_post_sq_info *, bool);
- enum i40iw_status_code (*iw_inline_send)(struct i40iw_qp_uk *,
- struct i40iw_post_sq_info *, u32, bool);
- enum i40iw_status_code (*iw_stag_local_invalidate)(struct i40iw_qp_uk *,
- struct i40iw_post_sq_info *, bool);
- enum i40iw_status_code (*iw_mw_bind)(struct i40iw_qp_uk *,
- struct i40iw_post_sq_info *, bool);
- enum i40iw_status_code (*iw_post_receive)(struct i40iw_qp_uk *,
- struct i40iw_post_rq_info *);
- enum i40iw_status_code (*iw_post_nop)(struct i40iw_qp_uk *, u64, bool, bool);
-};
-
-struct i40iw_cq_ops {
- void (*iw_cq_request_notification)(struct i40iw_cq_uk *,
- enum i40iw_completion_notify);
- enum i40iw_status_code (*iw_cq_poll_completion)(struct i40iw_cq_uk *,
- struct i40iw_cq_poll_info *);
- enum i40iw_status_code (*iw_cq_post_entries)(struct i40iw_cq_uk *, u8 count);
- void (*iw_cq_clean)(void *, struct i40iw_cq_uk *);
-};
-
-struct i40iw_dev_uk;
-
-struct i40iw_device_uk_ops {
- enum i40iw_status_code (*iwarp_cq_uk_init)(struct i40iw_cq_uk *,
- struct i40iw_cq_uk_init_info *);
- enum i40iw_status_code (*iwarp_qp_uk_init)(struct i40iw_qp_uk *,
- struct i40iw_qp_uk_init_info *);
-};
-
-struct i40iw_dev_uk {
- struct i40iw_device_uk_ops ops_uk;
-};
-
-struct i40iw_sq_uk_wr_trk_info {
- u64 wrid;
- u32 wr_len;
- u8 wqe_size;
- u8 reserved[3];
-};
-
-struct i40iw_qp_quanta {
- u64 elem[I40IW_WQE_SIZE];
-};
-
-struct i40iw_qp_uk {
- struct i40iw_qp_quanta *sq_base;
- struct i40iw_qp_quanta *rq_base;
- u32 __iomem *wqe_alloc_reg;
- struct i40iw_sq_uk_wr_trk_info *sq_wrtrk_array;
- u64 *rq_wrid_array;
- u64 *shadow_area;
- struct i40iw_ring sq_ring;
- struct i40iw_ring rq_ring;
- struct i40iw_ring initial_ring;
- u32 qp_id;
- u32 sq_size;
- u32 rq_size;
- u32 max_sq_frag_cnt;
- u32 max_rq_frag_cnt;
- struct i40iw_qp_uk_ops ops;
- bool use_srq;
- u8 swqe_polarity;
- u8 swqe_polarity_deferred;
- u8 rwqe_polarity;
- u8 rq_wqe_size;
- u8 rq_wqe_size_multiplier;
- bool first_sq_wq;
- bool deferred_flag;
-};
-
-struct i40iw_cq_uk {
- struct i40iw_cqe *cq_base;
- u32 __iomem *cqe_alloc_reg;
- u64 *shadow_area;
- u32 cq_id;
- u32 cq_size;
- struct i40iw_ring cq_ring;
- u8 polarity;
- bool avoid_mem_cflct;
-
- struct i40iw_cq_ops ops;
-};
-
-struct i40iw_qp_uk_init_info {
- struct i40iw_qp_quanta *sq;
- struct i40iw_qp_quanta *rq;
- u32 __iomem *wqe_alloc_reg;
- u64 *shadow_area;
- struct i40iw_sq_uk_wr_trk_info *sq_wrtrk_array;
- u64 *rq_wrid_array;
- u32 qp_id;
- u32 sq_size;
- u32 rq_size;
- u32 max_sq_frag_cnt;
- u32 max_rq_frag_cnt;
- u32 max_inline_data;
- int abi_ver;
-};
-
-struct i40iw_cq_uk_init_info {
- u32 __iomem *cqe_alloc_reg;
- struct i40iw_cqe *cq_base;
- u64 *shadow_area;
- u32 cq_size;
- u32 cq_id;
- bool avoid_mem_cflct;
-};
-
-void i40iw_device_init_uk(struct i40iw_dev_uk *dev);
-
-void i40iw_qp_post_wr(struct i40iw_qp_uk *qp);
-u64 *i40iw_qp_get_next_send_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx,
- u8 wqe_size,
- u32 total_size,
- u64 wr_id
- );
-u64 *i40iw_qp_get_next_recv_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx);
-u64 *i40iw_qp_get_next_srq_wqe(struct i40iw_srq_uk *srq, u32 *wqe_idx);
-
-enum i40iw_status_code i40iw_cq_uk_init(struct i40iw_cq_uk *cq,
- struct i40iw_cq_uk_init_info *info);
-enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp,
- struct i40iw_qp_uk_init_info *info);
-
-void i40iw_clean_cq(void *queue, struct i40iw_cq_uk *cq);
-enum i40iw_status_code i40iw_nop(struct i40iw_qp_uk *qp, u64 wr_id,
- bool signaled, bool post_sq);
-enum i40iw_status_code i40iw_fragcnt_to_wqesize_sq(u32 frag_cnt, u8 *wqe_size);
-enum i40iw_status_code i40iw_fragcnt_to_wqesize_rq(u32 frag_cnt, u8 *wqe_size);
-enum i40iw_status_code i40iw_inline_data_size_to_wqesize(u32 data_size,
- u8 *wqe_size);
-void i40iw_get_wqe_shift(u32 sge, u32 inline_data, u8 *shift);
-enum i40iw_status_code i40iw_get_sqdepth(u32 sq_size, u8 shift, u32 *sqdepth);
-enum i40iw_status_code i40iw_get_rqdepth(u32 rq_size, u8 shift, u32 *rqdepth);
-#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
deleted file mode 100644
index 9ff825f7860b..000000000000
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ /dev/null
@@ -1,1518 +0,0 @@
-/*******************************************************************************
-*
-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
-*
-* This software is available to you under a choice of one of two
-* licenses. You may choose to be licensed under the terms of the GNU
-* General Public License (GPL) Version 2, available from the file
-* COPYING in the main directory of this source tree, or the
-* OpenFabrics.org BSD license below:
-*
-* Redistribution and use in source and binary forms, with or
-* without modification, are permitted provided that the following
-* conditions are met:
-*
-* - Redistributions of source code must retain the above
-* copyright notice, this list of conditions and the following
-* disclaimer.
-*
-* - Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials
-* provided with the distribution.
-*
-* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-* SOFTWARE.
-*
-*******************************************************************************/
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/mii.h>
-#include <linux/if_vlan.h>
-#include <linux/crc32.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <asm/irq.h>
-#include <asm/byteorder.h>
-#include <net/netevent.h>
-#include <net/neighbour.h>
-#include "i40iw.h"
-
-/**
- * i40iw_arp_table - manage arp table
- * @iwdev: iwarp device
- * @ip_addr: ip address for device
- * @ipv4: flag indicating IPv4 when true
- * @mac_addr: mac address ptr
- * @action: modify, delete or add
- */
-int i40iw_arp_table(struct i40iw_device *iwdev,
- u32 *ip_addr,
- bool ipv4,
- u8 *mac_addr,
- u32 action)
-{
- int arp_index;
- int err;
- u32 ip[4];
-
- if (ipv4) {
- memset(ip, 0, sizeof(ip));
- ip[0] = *ip_addr;
- } else {
- memcpy(ip, ip_addr, sizeof(ip));
- }
-
- for (arp_index = 0; (u32)arp_index < iwdev->arp_table_size; arp_index++)
- if (memcmp(iwdev->arp_table[arp_index].ip_addr, ip, sizeof(ip)) == 0)
- break;
- switch (action) {
- case I40IW_ARP_ADD:
- if (arp_index != iwdev->arp_table_size)
- return -1;
-
- arp_index = 0;
- err = i40iw_alloc_resource(iwdev, iwdev->allocated_arps,
- iwdev->arp_table_size,
- (u32 *)&arp_index,
- &iwdev->next_arp_index);
-
- if (err)
- return err;
-
- memcpy(iwdev->arp_table[arp_index].ip_addr, ip, sizeof(ip));
- ether_addr_copy(iwdev->arp_table[arp_index].mac_addr, mac_addr);
- break;
- case I40IW_ARP_RESOLVE:
- if (arp_index == iwdev->arp_table_size)
- return -1;
- break;
- case I40IW_ARP_DELETE:
- if (arp_index == iwdev->arp_table_size)
- return -1;
- memset(iwdev->arp_table[arp_index].ip_addr, 0,
- sizeof(iwdev->arp_table[arp_index].ip_addr));
- eth_zero_addr(iwdev->arp_table[arp_index].mac_addr);
- i40iw_free_resource(iwdev, iwdev->allocated_arps, arp_index);
- break;
- default:
- return -1;
- }
- return arp_index;
-}
-
-/**
- * i40iw_wr32 - write 32 bits to hw register
- * @hw: hardware information including registers
- * @reg: register offset
- * @value: vvalue to write to register
- */
-inline void i40iw_wr32(struct i40iw_hw *hw, u32 reg, u32 value)
-{
- writel(value, hw->hw_addr + reg);
-}
-
-/**
- * i40iw_rd32 - read a 32 bit hw register
- * @hw: hardware information including registers
- * @reg: register offset
- *
- * Return value of register content
- */
-inline u32 i40iw_rd32(struct i40iw_hw *hw, u32 reg)
-{
- return readl(hw->hw_addr + reg);
-}
-
-/**
- * i40iw_inetaddr_event - system notifier for ipv4 addr events
- * @notifier: not used
- * @event: event for notifier
- * @ptr: if address
- */
-int i40iw_inetaddr_event(struct notifier_block *notifier,
- unsigned long event,
- void *ptr)
-{
- struct in_ifaddr *ifa = ptr;
- struct net_device *event_netdev = ifa->ifa_dev->dev;
- struct net_device *netdev;
- struct net_device *upper_dev;
- struct i40iw_device *iwdev;
- struct i40iw_handler *hdl;
- u32 local_ipaddr;
- u32 action = I40IW_ARP_ADD;
-
- hdl = i40iw_find_netdev(event_netdev);
- if (!hdl)
- return NOTIFY_DONE;
-
- iwdev = &hdl->device;
- if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing)
- return NOTIFY_DONE;
-
- netdev = iwdev->ldev->netdev;
- upper_dev = netdev_master_upper_dev_get(netdev);
- if (netdev != event_netdev)
- return NOTIFY_DONE;
-
- if (upper_dev) {
- struct in_device *in;
-
- rcu_read_lock();
- in = __in_dev_get_rcu(upper_dev);
-
- local_ipaddr = 0;
- if (in) {
- struct in_ifaddr *ifa;
-
- ifa = rcu_dereference(in->ifa_list);
- if (ifa)
- local_ipaddr = ntohl(ifa->ifa_address);
- }
-
- rcu_read_unlock();
- } else {
- local_ipaddr = ntohl(ifa->ifa_address);
- }
- switch (event) {
- case NETDEV_DOWN:
- action = I40IW_ARP_DELETE;
- fallthrough;
- case NETDEV_UP:
- case NETDEV_CHANGEADDR:
-
- /* Just skip if no need to handle ARP cache */
- if (!local_ipaddr)
- break;
-
- i40iw_manage_arp_cache(iwdev,
- netdev->dev_addr,
- &local_ipaddr,
- true,
- action);
- i40iw_if_notify(iwdev, netdev, &local_ipaddr, true,
- (action == I40IW_ARP_ADD) ? true : false);
- break;
- default:
- break;
- }
- return NOTIFY_DONE;
-}
-
-/**
- * i40iw_inet6addr_event - system notifier for ipv6 addr events
- * @notifier: not used
- * @event: event for notifier
- * @ptr: if address
- */
-int i40iw_inet6addr_event(struct notifier_block *notifier,
- unsigned long event,
- void *ptr)
-{
- struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
- struct net_device *event_netdev = ifa->idev->dev;
- struct net_device *netdev;
- struct i40iw_device *iwdev;
- struct i40iw_handler *hdl;
- u32 local_ipaddr6[4];
- u32 action = I40IW_ARP_ADD;
-
- hdl = i40iw_find_netdev(event_netdev);
- if (!hdl)
- return NOTIFY_DONE;
-
- iwdev = &hdl->device;
- if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing)
- return NOTIFY_DONE;
-
- netdev = iwdev->ldev->netdev;
- if (netdev != event_netdev)
- return NOTIFY_DONE;
-
- i40iw_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32);
- switch (event) {
- case NETDEV_DOWN:
- action = I40IW_ARP_DELETE;
- fallthrough;
- case NETDEV_UP:
- case NETDEV_CHANGEADDR:
- i40iw_manage_arp_cache(iwdev,
- netdev->dev_addr,
- local_ipaddr6,
- false,
- action);
- i40iw_if_notify(iwdev, netdev, local_ipaddr6, false,
- (action == I40IW_ARP_ADD) ? true : false);
- break;
- default:
- break;
- }
- return NOTIFY_DONE;
-}
-
-/**
- * i40iw_net_event - system notifier for netevents
- * @notifier: not used
- * @event: event for notifier
- * @ptr: neighbor
- */
-int i40iw_net_event(struct notifier_block *notifier, unsigned long event, void *ptr)
-{
- struct neighbour *neigh = ptr;
- struct i40iw_device *iwdev;
- struct i40iw_handler *iwhdl;
- __be32 *p;
- u32 local_ipaddr[4];
-
- switch (event) {
- case NETEVENT_NEIGH_UPDATE:
- iwhdl = i40iw_find_netdev((struct net_device *)neigh->dev);
- if (!iwhdl)
- return NOTIFY_DONE;
- iwdev = &iwhdl->device;
- if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing)
- return NOTIFY_DONE;
- p = (__be32 *)neigh->primary_key;
- i40iw_copy_ip_ntohl(local_ipaddr, p);
- if (neigh->nud_state & NUD_VALID) {
- i40iw_manage_arp_cache(iwdev,
- neigh->ha,
- local_ipaddr,
- false,
- I40IW_ARP_ADD);
-
- } else {
- i40iw_manage_arp_cache(iwdev,
- neigh->ha,
- local_ipaddr,
- false,
- I40IW_ARP_DELETE);
- }
- break;
- default:
- break;
- }
- return NOTIFY_DONE;
-}
-
-/**
- * i40iw_netdevice_event - system notifier for netdev events
- * @notifier: not used
- * @event: event for notifier
- * @ptr: netdev
- */
-int i40iw_netdevice_event(struct notifier_block *notifier,
- unsigned long event,
- void *ptr)
-{
- struct net_device *event_netdev;
- struct net_device *netdev;
- struct i40iw_device *iwdev;
- struct i40iw_handler *hdl;
-
- event_netdev = netdev_notifier_info_to_dev(ptr);
-
- hdl = i40iw_find_netdev(event_netdev);
- if (!hdl)
- return NOTIFY_DONE;
-
- iwdev = &hdl->device;
- if (iwdev->init_state < RDMA_DEV_REGISTERED || iwdev->closing)
- return NOTIFY_DONE;
-
- netdev = iwdev->ldev->netdev;
- if (netdev != event_netdev)
- return NOTIFY_DONE;
-
- iwdev->iw_status = 1;
-
- switch (event) {
- case NETDEV_DOWN:
- iwdev->iw_status = 0;
- fallthrough;
- case NETDEV_UP:
- i40iw_port_ibevent(iwdev);
- break;
- default:
- break;
- }
- return NOTIFY_DONE;
-}
-
-/**
- * i40iw_get_cqp_request - get cqp struct
- * @cqp: device cqp ptr
- * @wait: cqp to be used in wait mode
- */
-struct i40iw_cqp_request *i40iw_get_cqp_request(struct i40iw_cqp *cqp, bool wait)
-{
- struct i40iw_cqp_request *cqp_request = NULL;
- unsigned long flags;
-
- spin_lock_irqsave(&cqp->req_lock, flags);
- if (!list_empty(&cqp->cqp_avail_reqs)) {
- cqp_request = list_entry(cqp->cqp_avail_reqs.next,
- struct i40iw_cqp_request, list);
- list_del_init(&cqp_request->list);
- }
- spin_unlock_irqrestore(&cqp->req_lock, flags);
- if (!cqp_request) {
- cqp_request = kzalloc(sizeof(*cqp_request), GFP_ATOMIC);
- if (cqp_request) {
- cqp_request->dynamic = true;
- INIT_LIST_HEAD(&cqp_request->list);
- init_waitqueue_head(&cqp_request->waitq);
- }
- }
- if (!cqp_request) {
- i40iw_pr_err("CQP Request Fail: No Memory");
- return NULL;
- }
-
- if (wait) {
- atomic_set(&cqp_request->refcount, 2);
- cqp_request->waiting = true;
- } else {
- atomic_set(&cqp_request->refcount, 1);
- }
- return cqp_request;
-}
-
-/**
- * i40iw_free_cqp_request - free cqp request
- * @cqp: cqp ptr
- * @cqp_request: to be put back in cqp list
- */
-void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request)
-{
- struct i40iw_device *iwdev = container_of(cqp, struct i40iw_device, cqp);
- unsigned long flags;
-
- if (cqp_request->dynamic) {
- kfree(cqp_request);
- } else {
- cqp_request->request_done = false;
- cqp_request->callback_fcn = NULL;
- cqp_request->waiting = false;
-
- spin_lock_irqsave(&cqp->req_lock, flags);
- list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs);
- spin_unlock_irqrestore(&cqp->req_lock, flags);
- }
- wake_up(&iwdev->close_wq);
-}
-
-/**
- * i40iw_put_cqp_request - dec ref count and free if 0
- * @cqp: cqp ptr
- * @cqp_request: to be put back in cqp list
- */
-void i40iw_put_cqp_request(struct i40iw_cqp *cqp,
- struct i40iw_cqp_request *cqp_request)
-{
- if (atomic_dec_and_test(&cqp_request->refcount))
- i40iw_free_cqp_request(cqp, cqp_request);
-}
-
-/**
- * i40iw_free_pending_cqp_request -free pending cqp request objs
- * @cqp: cqp ptr
- * @cqp_request: to be put back in cqp list
- */
-static void i40iw_free_pending_cqp_request(struct i40iw_cqp *cqp,
- struct i40iw_cqp_request *cqp_request)
-{
- struct i40iw_device *iwdev = container_of(cqp, struct i40iw_device, cqp);
-
- if (cqp_request->waiting) {
- cqp_request->compl_info.error = true;
- cqp_request->request_done = true;
- wake_up(&cqp_request->waitq);
- }
- i40iw_put_cqp_request(cqp, cqp_request);
- wait_event_timeout(iwdev->close_wq,
- !atomic_read(&cqp_request->refcount),
- 1000);
-}
-
-/**
- * i40iw_cleanup_pending_cqp_op - clean-up cqp with no completions
- * @iwdev: iwarp device
- */
-void i40iw_cleanup_pending_cqp_op(struct i40iw_device *iwdev)
-{
- struct i40iw_sc_dev *dev = &iwdev->sc_dev;
- struct i40iw_cqp *cqp = &iwdev->cqp;
- struct i40iw_cqp_request *cqp_request = NULL;
- struct cqp_commands_info *pcmdinfo = NULL;
- u32 i, pending_work, wqe_idx;
-
- pending_work = I40IW_RING_WORK_AVAILABLE(cqp->sc_cqp.sq_ring);
- wqe_idx = I40IW_RING_GETCURRENT_TAIL(cqp->sc_cqp.sq_ring);
- for (i = 0; i < pending_work; i++) {
- cqp_request = (struct i40iw_cqp_request *)(unsigned long)cqp->scratch_array[wqe_idx];
- if (cqp_request)
- i40iw_free_pending_cqp_request(cqp, cqp_request);
- wqe_idx = (wqe_idx + 1) % I40IW_RING_GETSIZE(cqp->sc_cqp.sq_ring);
- }
-
- while (!list_empty(&dev->cqp_cmd_head)) {
- pcmdinfo = (struct cqp_commands_info *)i40iw_remove_head(&dev->cqp_cmd_head);
- cqp_request = container_of(pcmdinfo, struct i40iw_cqp_request, info);
- if (cqp_request)
- i40iw_free_pending_cqp_request(cqp, cqp_request);
- }
-}
-
-/**
- * i40iw_wait_event - wait for completion
- * @iwdev: iwarp device
- * @cqp_request: cqp request to wait
- */
-static int i40iw_wait_event(struct i40iw_device *iwdev,
- struct i40iw_cqp_request *cqp_request)
-{
- struct cqp_commands_info *info = &cqp_request->info;
- struct i40iw_cqp *iwcqp = &iwdev->cqp;
- struct i40iw_cqp_timeout cqp_timeout;
- bool cqp_error = false;
- int err_code = 0;
- memset(&cqp_timeout, 0, sizeof(cqp_timeout));
- cqp_timeout.compl_cqp_cmds = iwdev->sc_dev.cqp_cmd_stats[OP_COMPLETED_COMMANDS];
- do {
- if (wait_event_timeout(cqp_request->waitq,
- cqp_request->request_done, CQP_COMPL_WAIT_TIME))
- break;
-
- i40iw_check_cqp_progress(&cqp_timeout, &iwdev->sc_dev);
-
- if (cqp_timeout.count < CQP_TIMEOUT_THRESHOLD)
- continue;
-
- i40iw_pr_err("error cqp command 0x%x timed out", info->cqp_cmd);
- err_code = -ETIME;
- if (!iwdev->reset) {
- iwdev->reset = true;
- i40iw_request_reset(iwdev);
- }
- goto done;
- } while (1);
- cqp_error = cqp_request->compl_info.error;
- if (cqp_error) {
- i40iw_pr_err("error cqp command 0x%x completion maj = 0x%x min=0x%x\n",
- info->cqp_cmd, cqp_request->compl_info.maj_err_code,
- cqp_request->compl_info.min_err_code);
- err_code = -EPROTO;
- goto done;
- }
-done:
- i40iw_put_cqp_request(iwcqp, cqp_request);
- return err_code;
-}
-
-/**
- * i40iw_handle_cqp_op - process cqp command
- * @iwdev: iwarp device
- * @cqp_request: cqp request to process
- */
-enum i40iw_status_code i40iw_handle_cqp_op(struct i40iw_device *iwdev,
- struct i40iw_cqp_request
- *cqp_request)
-{
- struct i40iw_sc_dev *dev = &iwdev->sc_dev;
- enum i40iw_status_code status;
- struct cqp_commands_info *info = &cqp_request->info;
- int err_code = 0;
-
- if (iwdev->reset) {
- i40iw_free_cqp_request(&iwdev->cqp, cqp_request);
- return I40IW_ERR_CQP_COMPL_ERROR;
- }
-
- status = i40iw_process_cqp_cmd(dev, info);
- if (status) {
- i40iw_pr_err("error cqp command 0x%x failed\n", info->cqp_cmd);
- i40iw_free_cqp_request(&iwdev->cqp, cqp_request);
- return status;
- }
- if (cqp_request->waiting)
- err_code = i40iw_wait_event(iwdev, cqp_request);
- if (err_code)
- status = I40IW_ERR_CQP_COMPL_ERROR;
- return status;
-}
-
-/**
- * i40iw_add_devusecount - add dev refcount
- * @iwdev: dev for refcount
- */
-void i40iw_add_devusecount(struct i40iw_device *iwdev)
-{
- atomic64_inc(&iwdev->use_count);
-}
-
-/**
- * i40iw_rem_devusecount - decrement refcount for dev
- * @iwdev: device
- */
-void i40iw_rem_devusecount(struct i40iw_device *iwdev)
-{
- if (!atomic64_dec_and_test(&iwdev->use_count))
- return;
- wake_up(&iwdev->close_wq);
-}
-
-/**
- * i40iw_add_pdusecount - add pd refcount
- * @iwpd: pd for refcount
- */
-void i40iw_add_pdusecount(struct i40iw_pd *iwpd)
-{
- atomic_inc(&iwpd->usecount);
-}
-
-/**
- * i40iw_rem_pdusecount - decrement refcount for pd and free if 0
- * @iwpd: pd for refcount
- * @iwdev: iwarp device
- */
-void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev)
-{
- if (!atomic_dec_and_test(&iwpd->usecount))
- return;
- i40iw_free_resource(iwdev, iwdev->allocated_pds, iwpd->sc_pd.pd_id);
-}
-
-/**
- * i40iw_qp_add_ref - add refcount for qp
- * @ibqp: iqarp qp
- */
-void i40iw_qp_add_ref(struct ib_qp *ibqp)
-{
- struct i40iw_qp *iwqp = (struct i40iw_qp *)ibqp;
-
- refcount_inc(&iwqp->refcount);
-}
-
-/**
- * i40iw_qp_rem_ref - rem refcount for qp and free if 0
- * @ibqp: iqarp qp
- */
-void i40iw_qp_rem_ref(struct ib_qp *ibqp)
-{
- struct i40iw_qp *iwqp;
- struct i40iw_device *iwdev;
- u32 qp_num;
- unsigned long flags;
-
- iwqp = to_iwqp(ibqp);
- iwdev = iwqp->iwdev;
- spin_lock_irqsave(&iwdev->qptable_lock, flags);
- if (!refcount_dec_and_test(&iwqp->refcount)) {
- spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
- return;
- }
-
- qp_num = iwqp->ibqp.qp_num;
- iwdev->qp_table[qp_num] = NULL;
- spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
- complete(&iwqp->free_qp);
-
-}
-
-/**
- * i40iw_get_qp - get qp address
- * @device: iwarp device
- * @qpn: qp number
- */
-struct ib_qp *i40iw_get_qp(struct ib_device *device, int qpn)
-{
- struct i40iw_device *iwdev = to_iwdev(device);
-
- if ((qpn < IW_FIRST_QPN) || (qpn >= iwdev->max_qp))
- return NULL;
-
- return &iwdev->qp_table[qpn]->ibqp;
-}
-
-/**
- * i40iw_debug_buf - print debug msg and buffer is mask set
- * @dev: hardware control device structure
- * @mask: mask to compare if to print debug buffer
- * @desc: identifying string
- * @buf: points buffer addr
- * @size: saize of buffer to print
- */
-void i40iw_debug_buf(struct i40iw_sc_dev *dev,
- enum i40iw_debug_flag mask,
- char *desc,
- u64 *buf,
- u32 size)
-{
- u32 i;
-
- if (!(dev->debug_mask & mask))
- return;
- i40iw_debug(dev, mask, "%s\n", desc);
- i40iw_debug(dev, mask, "starting address virt=%p phy=%llxh\n", buf,
- (unsigned long long)virt_to_phys(buf));
-
- for (i = 0; i < size; i += 8)
- i40iw_debug(dev, mask, "index %03d val: %016llx\n", i, buf[i / 8]);
-}
-
-/**
- * i40iw_get_hw_addr - return hw addr
- * @par: points to shared dev
- */
-u8 __iomem *i40iw_get_hw_addr(void *par)
-{
- struct i40iw_sc_dev *dev = (struct i40iw_sc_dev *)par;
-
- return dev->hw->hw_addr;
-}
-
-/**
- * i40iw_remove_head - return head entry and remove from list
- * @list: list for entry
- */
-void *i40iw_remove_head(struct list_head *list)
-{
- struct list_head *entry;
-
- if (list_empty(list))
- return NULL;
-
- entry = (void *)list->next;
- list_del(entry);
- return (void *)entry;
-}
-
-/**
- * i40iw_allocate_dma_mem - Memory alloc helper fn
- * @hw: pointer to the HW structure
- * @mem: ptr to mem struct to fill out
- * @size: size of memory requested
- * @alignment: what to align the allocation to
- */
-enum i40iw_status_code i40iw_allocate_dma_mem(struct i40iw_hw *hw,
- struct i40iw_dma_mem *mem,
- u64 size,
- u32 alignment)
-{
- struct pci_dev *pcidev = hw->pcidev;
-
- if (!mem)
- return I40IW_ERR_PARAM;
- mem->size = ALIGN(size, alignment);
- mem->va = dma_alloc_coherent(&pcidev->dev, mem->size,
- (dma_addr_t *)&mem->pa, GFP_KERNEL);
- if (!mem->va)
- return I40IW_ERR_NO_MEMORY;
- return 0;
-}
-
-/**
- * i40iw_free_dma_mem - Memory free helper fn
- * @hw: pointer to the HW structure
- * @mem: ptr to mem struct to free
- */
-void i40iw_free_dma_mem(struct i40iw_hw *hw, struct i40iw_dma_mem *mem)
-{
- struct pci_dev *pcidev = hw->pcidev;
-
- if (!mem || !mem->va)
- return;
-
- dma_free_coherent(&pcidev->dev, mem->size,
- mem->va, (dma_addr_t)mem->pa);
- mem->va = NULL;
-}
-
-/**
- * i40iw_allocate_virt_mem - virtual memory alloc helper fn
- * @hw: pointer to the HW structure
- * @mem: ptr to mem struct to fill out
- * @size: size of memory requested
- */
-enum i40iw_status_code i40iw_allocate_virt_mem(struct i40iw_hw *hw,
- struct i40iw_virt_mem *mem,
- u32 size)
-{
- if (!mem)
- return I40IW_ERR_PARAM;
-
- mem->size = size;
- mem->va = kzalloc(size, GFP_KERNEL);
-
- if (mem->va)
- return 0;
- else
- return I40IW_ERR_NO_MEMORY;
-}
-
-/**
- * i40iw_free_virt_mem - virtual memory free helper fn
- * @hw: pointer to the HW structure
- * @mem: ptr to mem struct to free
- */
-enum i40iw_status_code i40iw_free_virt_mem(struct i40iw_hw *hw,
- struct i40iw_virt_mem *mem)
-{
- if (!mem)
- return I40IW_ERR_PARAM;
- /*
- * mem->va points to the parent of mem, so both mem and mem->va
- * can not be touched once mem->va is freed
- */
- kfree(mem->va);
- return 0;
-}
-
-/**
- * i40iw_cqp_sds_cmd - create cqp command for sd
- * @dev: hardware control device structure
- * @sdinfo: information for sd cqp
- *
- */
-enum i40iw_status_code i40iw_cqp_sds_cmd(struct i40iw_sc_dev *dev,
- struct i40iw_update_sds_info *sdinfo)
-{
- enum i40iw_status_code status;
- struct i40iw_cqp_request *cqp_request;
- struct cqp_commands_info *cqp_info;
- struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
-
- cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
- if (!cqp_request)
- return I40IW_ERR_NO_MEMORY;
- cqp_info = &cqp_request->info;
- memcpy(&cqp_info->in.u.update_pe_sds.info, sdinfo,
- sizeof(cqp_info->in.u.update_pe_sds.info));
- cqp_info->cqp_cmd = OP_UPDATE_PE_SDS;
- cqp_info->post_sq = 1;
- cqp_info->in.u.update_pe_sds.dev = dev;
- cqp_info->in.u.update_pe_sds.scratch = (uintptr_t)cqp_request;
- status = i40iw_handle_cqp_op(iwdev, cqp_request);
- if (status)
- i40iw_pr_err("CQP-OP Update SD's fail");
- return status;
-}
-
-/**
- * i40iw_qp_suspend_resume - cqp command for suspend/resume
- * @dev: hardware control device structure
- * @qp: hardware control qp
- * @suspend: flag if suspend or resume
- */
-void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, bool suspend)
-{
- struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
- struct i40iw_cqp_request *cqp_request;
- struct i40iw_sc_cqp *cqp = dev->cqp;
- struct cqp_commands_info *cqp_info;
- enum i40iw_status_code status;
-
- cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
- if (!cqp_request)
- return;
-
- cqp_info = &cqp_request->info;
- cqp_info->cqp_cmd = (suspend) ? OP_SUSPEND : OP_RESUME;
- cqp_info->in.u.suspend_resume.cqp = cqp;
- cqp_info->in.u.suspend_resume.qp = qp;
- cqp_info->in.u.suspend_resume.scratch = (uintptr_t)cqp_request;
- status = i40iw_handle_cqp_op(iwdev, cqp_request);
- if (status)
- i40iw_pr_err("CQP-OP QP Suspend/Resume fail");
-}
-
-/**
- * i40iw_term_modify_qp - modify qp for term message
- * @qp: hardware control qp
- * @next_state: qp's next state
- * @term: terminate code
- * @term_len: length
- */
-void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len)
-{
- struct i40iw_qp *iwqp;
-
- iwqp = (struct i40iw_qp *)qp->back_qp;
- i40iw_next_iw_state(iwqp, next_state, 0, term, term_len);
-};
-
-/**
- * i40iw_terminate_done - after terminate is completed
- * @qp: hardware control qp
- * @timeout_occurred: indicates if terminate timer expired
- */
-void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred)
-{
- struct i40iw_qp *iwqp;
- u32 next_iwarp_state = I40IW_QP_STATE_ERROR;
- u8 hte = 0;
- bool first_time;
- unsigned long flags;
-
- iwqp = (struct i40iw_qp *)qp->back_qp;
- spin_lock_irqsave(&iwqp->lock, flags);
- if (iwqp->hte_added) {
- iwqp->hte_added = 0;
- hte = 1;
- }
- first_time = !(qp->term_flags & I40IW_TERM_DONE);
- qp->term_flags |= I40IW_TERM_DONE;
- spin_unlock_irqrestore(&iwqp->lock, flags);
- if (first_time) {
- if (!timeout_occurred)
- i40iw_terminate_del_timer(qp);
- else
- next_iwarp_state = I40IW_QP_STATE_CLOSING;
-
- i40iw_next_iw_state(iwqp, next_iwarp_state, hte, 0, 0);
- i40iw_cm_disconn(iwqp);
- }
-}
-
-/**
- * i40iw_terminate_timeout - timeout happened
- * @t: points to iwarp qp
- */
-static void i40iw_terminate_timeout(struct timer_list *t)
-{
- struct i40iw_qp *iwqp = from_timer(iwqp, t, terminate_timer);
- struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)&iwqp->sc_qp;
-
- i40iw_terminate_done(qp, 1);
- i40iw_qp_rem_ref(&iwqp->ibqp);
-}
-
-/**
- * i40iw_terminate_start_timer - start terminate timeout
- * @qp: hardware control qp
- */
-void i40iw_terminate_start_timer(struct i40iw_sc_qp *qp)
-{
- struct i40iw_qp *iwqp;
-
- iwqp = (struct i40iw_qp *)qp->back_qp;
- i40iw_qp_add_ref(&iwqp->ibqp);
- timer_setup(&iwqp->terminate_timer, i40iw_terminate_timeout, 0);
- iwqp->terminate_timer.expires = jiffies + HZ;
- add_timer(&iwqp->terminate_timer);
-}
-
-/**
- * i40iw_terminate_del_timer - delete terminate timeout
- * @qp: hardware control qp
- */
-void i40iw_terminate_del_timer(struct i40iw_sc_qp *qp)
-{
- struct i40iw_qp *iwqp;
-
- iwqp = (struct i40iw_qp *)qp->back_qp;
- if (del_timer(&iwqp->terminate_timer))
- i40iw_qp_rem_ref(&iwqp->ibqp);
-}
-
-/**
- * i40iw_cqp_generic_worker - generic worker for cqp
- * @work: work pointer
- */
-static void i40iw_cqp_generic_worker(struct work_struct *work)
-{
- struct i40iw_virtchnl_work_info *work_info =
- &((struct virtchnl_work *)work)->work_info;
-
- if (work_info->worker_vf_dev)
- work_info->callback_fcn(work_info->worker_vf_dev);
-}
-
-/**
- * i40iw_cqp_spawn_worker - spawn worket thread
- * @dev: device struct pointer
- * @work_info: work request info
- * @iw_vf_idx: virtual function index
- */
-void i40iw_cqp_spawn_worker(struct i40iw_sc_dev *dev,
- struct i40iw_virtchnl_work_info *work_info,
- u32 iw_vf_idx)
-{
- struct virtchnl_work *work;
- struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
-
- work = &iwdev->virtchnl_w[iw_vf_idx];
- memcpy(&work->work_info, work_info, sizeof(*work_info));
- INIT_WORK(&work->work, i40iw_cqp_generic_worker);
- queue_work(iwdev->virtchnl_wq, &work->work);
-}
-
-/**
- * i40iw_cqp_manage_hmc_fcn_worker -
- * @work: work pointer for hmc info
- */
-static void i40iw_cqp_manage_hmc_fcn_worker(struct work_struct *work)
-{
- struct i40iw_cqp_request *cqp_request =
- ((struct virtchnl_work *)work)->cqp_request;
- struct i40iw_ccq_cqe_info ccq_cqe_info;
- struct i40iw_hmc_fcn_info *hmcfcninfo =
- &cqp_request->info.in.u.manage_hmc_pm.info;
- struct i40iw_device *iwdev =
- (struct i40iw_device *)cqp_request->info.in.u.manage_hmc_pm.dev->back_dev;
-
- ccq_cqe_info.cqp = NULL;
- ccq_cqe_info.maj_err_code = cqp_request->compl_info.maj_err_code;
- ccq_cqe_info.min_err_code = cqp_request->compl_info.min_err_code;
- ccq_cqe_info.op_code = cqp_request->compl_info.op_code;
- ccq_cqe_info.op_ret_val = cqp_request->compl_info.op_ret_val;
- ccq_cqe_info.scratch = 0;
- ccq_cqe_info.error = cqp_request->compl_info.error;
- hmcfcninfo->callback_fcn(cqp_request->info.in.u.manage_hmc_pm.dev,
- hmcfcninfo->cqp_callback_param, &ccq_cqe_info);
- i40iw_put_cqp_request(&iwdev->cqp, cqp_request);
-}
-
-/**
- * i40iw_cqp_manage_hmc_fcn_callback - called function after cqp completion
- * @cqp_request: cqp request info struct for hmc fun
- * @unused: unused param of callback
- */
-static void i40iw_cqp_manage_hmc_fcn_callback(struct i40iw_cqp_request *cqp_request,
- u32 unused)
-{
- struct virtchnl_work *work;
- struct i40iw_hmc_fcn_info *hmcfcninfo =
- &cqp_request->info.in.u.manage_hmc_pm.info;
- struct i40iw_device *iwdev =
- (struct i40iw_device *)cqp_request->info.in.u.manage_hmc_pm.dev->
- back_dev;
-
- if (hmcfcninfo && hmcfcninfo->callback_fcn) {
- i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s1\n", __func__);
- atomic_inc(&cqp_request->refcount);
- work = &iwdev->virtchnl_w[hmcfcninfo->iw_vf_idx];
- work->cqp_request = cqp_request;
- INIT_WORK(&work->work, i40iw_cqp_manage_hmc_fcn_worker);
- queue_work(iwdev->virtchnl_wq, &work->work);
- i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s2\n", __func__);
- } else {
- i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s: Something wrong\n", __func__);
- }
-}
-
-/**
- * i40iw_cqp_manage_hmc_fcn_cmd - issue cqp command to manage hmc
- * @dev: hardware control device structure
- * @hmcfcninfo: info for hmc
- */
-enum i40iw_status_code i40iw_cqp_manage_hmc_fcn_cmd(struct i40iw_sc_dev *dev,
- struct i40iw_hmc_fcn_info *hmcfcninfo)
-{
- enum i40iw_status_code status;
- struct i40iw_cqp_request *cqp_request;
- struct cqp_commands_info *cqp_info;
- struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
-
- i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s\n", __func__);
- cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
- if (!cqp_request)
- return I40IW_ERR_NO_MEMORY;
- cqp_info = &cqp_request->info;
- cqp_request->callback_fcn = i40iw_cqp_manage_hmc_fcn_callback;
- cqp_request->param = hmcfcninfo;
- memcpy(&cqp_info->in.u.manage_hmc_pm.info, hmcfcninfo,
- sizeof(*hmcfcninfo));
- cqp_info->in.u.manage_hmc_pm.dev = dev;
- cqp_info->cqp_cmd = OP_MANAGE_HMC_PM_FUNC_TABLE;
- cqp_info->post_sq = 1;
- cqp_info->in.u.manage_hmc_pm.scratch = (uintptr_t)cqp_request;
- status = i40iw_handle_cqp_op(iwdev, cqp_request);
- if (status)
- i40iw_pr_err("CQP-OP Manage HMC fail");
- return status;
-}
-
-/**
- * i40iw_cqp_query_fpm_values_cmd - send cqp command for fpm
- * @dev: function device struct
- * @values_mem: buffer for fpm
- * @hmc_fn_id: function id for fpm
- */
-enum i40iw_status_code i40iw_cqp_query_fpm_values_cmd(struct i40iw_sc_dev *dev,
- struct i40iw_dma_mem *values_mem,
- u8 hmc_fn_id)
-{
- enum i40iw_status_code status;
- struct i40iw_cqp_request *cqp_request;
- struct cqp_commands_info *cqp_info;
- struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
-
- cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
- if (!cqp_request)
- return I40IW_ERR_NO_MEMORY;
- cqp_info = &cqp_request->info;
- cqp_request->param = NULL;
- cqp_info->in.u.query_fpm_values.cqp = dev->cqp;
- cqp_info->in.u.query_fpm_values.fpm_values_pa = values_mem->pa;
- cqp_info->in.u.query_fpm_values.fpm_values_va = values_mem->va;
- cqp_info->in.u.query_fpm_values.hmc_fn_id = hmc_fn_id;
- cqp_info->cqp_cmd = OP_QUERY_FPM_VALUES;
- cqp_info->post_sq = 1;
- cqp_info->in.u.query_fpm_values.scratch = (uintptr_t)cqp_request;
- status = i40iw_handle_cqp_op(iwdev, cqp_request);
- if (status)
- i40iw_pr_err("CQP-OP Query FPM fail");
- return status;
-}
-
-/**
- * i40iw_cqp_commit_fpm_values_cmd - commit fpm values in hw
- * @dev: hardware control device structure
- * @values_mem: buffer with fpm values
- * @hmc_fn_id: function id for fpm
- */
-enum i40iw_status_code i40iw_cqp_commit_fpm_values_cmd(struct i40iw_sc_dev *dev,
- struct i40iw_dma_mem *values_mem,
- u8 hmc_fn_id)
-{
- enum i40iw_status_code status;
- struct i40iw_cqp_request *cqp_request;
- struct cqp_commands_info *cqp_info;
- struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
-
- cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
- if (!cqp_request)
- return I40IW_ERR_NO_MEMORY;
- cqp_info = &cqp_request->info;
- cqp_request->param = NULL;
- cqp_info->in.u.commit_fpm_values.cqp = dev->cqp;
- cqp_info->in.u.commit_fpm_values.fpm_values_pa = values_mem->pa;
- cqp_info->in.u.commit_fpm_values.fpm_values_va = values_mem->va;
- cqp_info->in.u.commit_fpm_values.hmc_fn_id = hmc_fn_id;
- cqp_info->cqp_cmd = OP_COMMIT_FPM_VALUES;
- cqp_info->post_sq = 1;
- cqp_info->in.u.commit_fpm_values.scratch = (uintptr_t)cqp_request;
- status = i40iw_handle_cqp_op(iwdev, cqp_request);
- if (status)
- i40iw_pr_err("CQP-OP Commit FPM fail");
- return status;
-}
-
-/**
- * i40iw_vf_wait_vchnl_resp - wait for channel msg
- * @dev: function's device struct
- */
-enum i40iw_status_code i40iw_vf_wait_vchnl_resp(struct i40iw_sc_dev *dev)
-{
- struct i40iw_device *iwdev = dev->back_dev;
- int timeout_ret;
-
- i40iw_debug(dev, I40IW_DEBUG_VIRT, "%s[%u] dev %p, iwdev %p\n",
- __func__, __LINE__, dev, iwdev);
-
- atomic_set(&iwdev->vchnl_msgs, 2);
- timeout_ret = wait_event_timeout(iwdev->vchnl_waitq,
- (atomic_read(&iwdev->vchnl_msgs) == 1),
- I40IW_VCHNL_EVENT_TIMEOUT);
- atomic_dec(&iwdev->vchnl_msgs);
- if (!timeout_ret) {
- i40iw_pr_err("virt channel completion timeout = 0x%x\n", timeout_ret);
- atomic_set(&iwdev->vchnl_msgs, 0);
- dev->vchnl_up = false;
- return I40IW_ERR_TIMEOUT;
- }
- wake_up(&dev->vf_reqs);
- return 0;
-}
-
-/**
- * i40iw_cqp_cq_create_cmd - create a cq for the cqp
- * @dev: device pointer
- * @cq: pointer to created cq
- */
-enum i40iw_status_code i40iw_cqp_cq_create_cmd(struct i40iw_sc_dev *dev,
- struct i40iw_sc_cq *cq)
-{
- struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
- struct i40iw_cqp *iwcqp = &iwdev->cqp;
- struct i40iw_cqp_request *cqp_request;
- struct cqp_commands_info *cqp_info;
- enum i40iw_status_code status;
-
- cqp_request = i40iw_get_cqp_request(iwcqp, true);
- if (!cqp_request)
- return I40IW_ERR_NO_MEMORY;
-
- cqp_info = &cqp_request->info;
- cqp_info->cqp_cmd = OP_CQ_CREATE;
- cqp_info->post_sq = 1;
- cqp_info->in.u.cq_create.cq = cq;
- cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
- status = i40iw_handle_cqp_op(iwdev, cqp_request);
- if (status)
- i40iw_pr_err("CQP-OP Create QP fail");
-
- return status;
-}
-
-/**
- * i40iw_cqp_qp_create_cmd - create a qp for the cqp
- * @dev: device pointer
- * @qp: pointer to created qp
- */
-enum i40iw_status_code i40iw_cqp_qp_create_cmd(struct i40iw_sc_dev *dev,
- struct i40iw_sc_qp *qp)
-{
- struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
- struct i40iw_cqp *iwcqp = &iwdev->cqp;
- struct i40iw_cqp_request *cqp_request;
- struct cqp_commands_info *cqp_info;
- struct i40iw_create_qp_info *qp_info;
- enum i40iw_status_code status;
-
- cqp_request = i40iw_get_cqp_request(iwcqp, true);
- if (!cqp_request)
- return I40IW_ERR_NO_MEMORY;
-
- cqp_info = &cqp_request->info;
- qp_info = &cqp_request->info.in.u.qp_create.info;
-
- memset(qp_info, 0, sizeof(*qp_info));
-
- qp_info->cq_num_valid = true;
- qp_info->next_iwarp_state = I40IW_QP_STATE_RTS;
-
- cqp_info->cqp_cmd = OP_QP_CREATE;
- cqp_info->post_sq = 1;
- cqp_info->in.u.qp_create.qp = qp;
- cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
- status = i40iw_handle_cqp_op(iwdev, cqp_request);
- if (status)
- i40iw_pr_err("CQP-OP QP create fail");
- return status;
-}
-
-/**
- * i40iw_cqp_cq_destroy_cmd - destroy the cqp cq
- * @dev: device pointer
- * @cq: pointer to cq
- */
-void i40iw_cqp_cq_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq)
-{
- struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
-
- i40iw_cq_wq_destroy(iwdev, cq);
-}
-
-/**
- * i40iw_cqp_qp_destroy_cmd - destroy the cqp
- * @dev: device pointer
- * @qp: pointer to qp
- */
-void i40iw_cqp_qp_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
-{
- struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
- struct i40iw_cqp *iwcqp = &iwdev->cqp;
- struct i40iw_cqp_request *cqp_request;
- struct cqp_commands_info *cqp_info;
- enum i40iw_status_code status;
-
- cqp_request = i40iw_get_cqp_request(iwcqp, true);
- if (!cqp_request)
- return;
-
- cqp_info = &cqp_request->info;
- memset(cqp_info, 0, sizeof(*cqp_info));
-
- cqp_info->cqp_cmd = OP_QP_DESTROY;
- cqp_info->post_sq = 1;
- cqp_info->in.u.qp_destroy.qp = qp;
- cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;
- cqp_info->in.u.qp_destroy.remove_hash_idx = true;
- status = i40iw_handle_cqp_op(iwdev, cqp_request);
- if (status)
- i40iw_pr_err("CQP QP_DESTROY fail");
-}
-
-
-/**
- * i40iw_ieq_mpa_crc_ae - generate AE for crc error
- * @dev: hardware control device structure
- * @qp: hardware control qp
- */
-void i40iw_ieq_mpa_crc_ae(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
-{
- struct i40iw_gen_ae_info info;
- struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
-
- i40iw_debug(dev, I40IW_DEBUG_AEQ, "%s entered\n", __func__);
- info.ae_code = I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR;
- info.ae_source = I40IW_AE_SOURCE_RQ;
- i40iw_gen_ae(iwdev, qp, &info, false);
-}
-
-/**
- * i40iw_init_hash_desc - initialize hash for crc calculation
- * @desc: cryption type
- */
-enum i40iw_status_code i40iw_init_hash_desc(struct shash_desc **desc)
-{
- struct crypto_shash *tfm;
- struct shash_desc *tdesc;
-
- tfm = crypto_alloc_shash("crc32c", 0, 0);
- if (IS_ERR(tfm))
- return I40IW_ERR_MPA_CRC;
-
- tdesc = kzalloc(sizeof(*tdesc) + crypto_shash_descsize(tfm),
- GFP_KERNEL);
- if (!tdesc) {
- crypto_free_shash(tfm);
- return I40IW_ERR_MPA_CRC;
- }
- tdesc->tfm = tfm;
- *desc = tdesc;
-
- return 0;
-}
-
-/**
- * i40iw_free_hash_desc - free hash desc
- * @desc: to be freed
- */
-void i40iw_free_hash_desc(struct shash_desc *desc)
-{
- if (desc) {
- crypto_free_shash(desc->tfm);
- kfree(desc);
- }
-}
-
-/**
- * i40iw_alloc_query_fpm_buf - allocate buffer for fpm
- * @dev: hardware control device structure
- * @mem: buffer ptr for fpm to be allocated
- * @return: memory allocation status
- */
-enum i40iw_status_code i40iw_alloc_query_fpm_buf(struct i40iw_sc_dev *dev,
- struct i40iw_dma_mem *mem)
-{
- enum i40iw_status_code status;
- struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
-
- status = i40iw_obj_aligned_mem(iwdev, mem, I40IW_QUERY_FPM_BUF_SIZE,
- I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK);
- return status;
-}
-
-/**
- * i40iw_ieq_check_mpacrc - check if mpa crc is OK
- * @desc: desc for hash
- * @addr: address of buffer for crc
- * @length: length of buffer
- * @value: value to be compared
- */
-enum i40iw_status_code i40iw_ieq_check_mpacrc(struct shash_desc *desc,
- void *addr,
- u32 length,
- u32 value)
-{
- u32 crc = 0;
- int ret;
- enum i40iw_status_code ret_code = 0;
-
- crypto_shash_init(desc);
- ret = crypto_shash_update(desc, addr, length);
- if (!ret)
- crypto_shash_final(desc, (u8 *)&crc);
- if (crc != value) {
- i40iw_pr_err("mpa crc check fail\n");
- ret_code = I40IW_ERR_MPA_CRC;
- }
- return ret_code;
-}
-
-/**
- * i40iw_ieq_get_qp - get qp based on quad in puda buffer
- * @dev: hardware control device structure
- * @buf: receive puda buffer on exception q
- */
-struct i40iw_sc_qp *i40iw_ieq_get_qp(struct i40iw_sc_dev *dev,
- struct i40iw_puda_buf *buf)
-{
- struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
- struct i40iw_qp *iwqp;
- struct i40iw_cm_node *cm_node;
- u32 loc_addr[4], rem_addr[4];
- u16 loc_port, rem_port;
- struct ipv6hdr *ip6h;
- struct iphdr *iph = (struct iphdr *)buf->iph;
- struct tcphdr *tcph = (struct tcphdr *)buf->tcph;
-
- if (iph->version == 4) {
- memset(loc_addr, 0, sizeof(loc_addr));
- loc_addr[0] = ntohl(iph->daddr);
- memset(rem_addr, 0, sizeof(rem_addr));
- rem_addr[0] = ntohl(iph->saddr);
- } else {
- ip6h = (struct ipv6hdr *)buf->iph;
- i40iw_copy_ip_ntohl(loc_addr, ip6h->daddr.in6_u.u6_addr32);
- i40iw_copy_ip_ntohl(rem_addr, ip6h->saddr.in6_u.u6_addr32);
- }
- loc_port = ntohs(tcph->dest);
- rem_port = ntohs(tcph->source);
-
- cm_node = i40iw_find_node(&iwdev->cm_core, rem_port, rem_addr, loc_port,
- loc_addr, false, true);
- if (!cm_node)
- return NULL;
- iwqp = cm_node->iwqp;
- return &iwqp->sc_qp;
-}
-
-/**
- * i40iw_ieq_update_tcpip_info - update tcpip in the buffer
- * @buf: puda to update
- * @length: length of buffer
- * @seqnum: seq number for tcp
- */
-void i40iw_ieq_update_tcpip_info(struct i40iw_puda_buf *buf, u16 length, u32 seqnum)
-{
- struct tcphdr *tcph;
- struct iphdr *iph;
- u16 iphlen;
- u16 packetsize;
- u8 *addr = (u8 *)buf->mem.va;
-
- iphlen = (buf->ipv4) ? 20 : 40;
- iph = (struct iphdr *)(addr + buf->maclen);
- tcph = (struct tcphdr *)(addr + buf->maclen + iphlen);
- packetsize = length + buf->tcphlen + iphlen;
-
- iph->tot_len = htons(packetsize);
- tcph->seq = htonl(seqnum);
-}
-
-/**
- * i40iw_puda_get_tcpip_info - get tcpip info from puda buffer
- * @info: to get information
- * @buf: puda buffer
- */
-enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_info *info,
- struct i40iw_puda_buf *buf)
-{
- struct iphdr *iph;
- struct ipv6hdr *ip6h;
- struct tcphdr *tcph;
- u16 iphlen;
- u16 pkt_len;
- u8 *mem = (u8 *)buf->mem.va;
- struct ethhdr *ethh = (struct ethhdr *)buf->mem.va;
-
- if (ethh->h_proto == htons(0x8100)) {
- info->vlan_valid = true;
- buf->vlan_id = ntohs(((struct vlan_ethhdr *)ethh)->h_vlan_TCI) & VLAN_VID_MASK;
- }
- buf->maclen = (info->vlan_valid) ? 18 : 14;
- iphlen = (info->l3proto) ? 40 : 20;
- buf->ipv4 = (info->l3proto) ? false : true;
- buf->iph = mem + buf->maclen;
- iph = (struct iphdr *)buf->iph;
-
- buf->tcph = buf->iph + iphlen;
- tcph = (struct tcphdr *)buf->tcph;
-
- if (buf->ipv4) {
- pkt_len = ntohs(iph->tot_len);
- } else {
- ip6h = (struct ipv6hdr *)buf->iph;
- pkt_len = ntohs(ip6h->payload_len) + iphlen;
- }
-
- buf->totallen = pkt_len + buf->maclen;
-
- if (info->payload_len < buf->totallen) {
- i40iw_pr_err("payload_len = 0x%x totallen expected0x%x\n",
- info->payload_len, buf->totallen);
- return I40IW_ERR_INVALID_SIZE;
- }
-
- buf->tcphlen = (tcph->doff) << 2;
- buf->datalen = pkt_len - iphlen - buf->tcphlen;
- buf->data = (buf->datalen) ? buf->tcph + buf->tcphlen : NULL;
- buf->hdrlen = buf->maclen + iphlen + buf->tcphlen;
- buf->seqnum = ntohl(tcph->seq);
- return 0;
-}
-
-/**
- * i40iw_hw_stats_timeout - Stats timer-handler which updates all HW stats
- * @t: Timer context containing pointer to the vsi structure
- */
-static void i40iw_hw_stats_timeout(struct timer_list *t)
-{
- struct i40iw_vsi_pestat *pf_devstat = from_timer(pf_devstat, t,
- stats_timer);
- struct i40iw_sc_vsi *sc_vsi = pf_devstat->vsi;
- struct i40iw_sc_dev *pf_dev = sc_vsi->dev;
- struct i40iw_vsi_pestat *vf_devstat = NULL;
- u16 iw_vf_idx;
- unsigned long flags;
-
- /*PF*/
- i40iw_hw_stats_read_all(pf_devstat, &pf_devstat->hw_stats);
-
- for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT; iw_vf_idx++) {
- spin_lock_irqsave(&pf_devstat->lock, flags);
- if (pf_dev->vf_dev[iw_vf_idx]) {
- if (pf_dev->vf_dev[iw_vf_idx]->stats_initialized) {
- vf_devstat = &pf_dev->vf_dev[iw_vf_idx]->pestat;
- i40iw_hw_stats_read_all(vf_devstat, &vf_devstat->hw_stats);
- }
- }
- spin_unlock_irqrestore(&pf_devstat->lock, flags);
- }
-
- mod_timer(&pf_devstat->stats_timer,
- jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
-}
-
-/**
- * i40iw_hw_stats_start_timer - Start periodic stats timer
- * @vsi: pointer to the vsi structure
- */
-void i40iw_hw_stats_start_timer(struct i40iw_sc_vsi *vsi)
-{
- struct i40iw_vsi_pestat *devstat = vsi->pestat;
-
- timer_setup(&devstat->stats_timer, i40iw_hw_stats_timeout, 0);
- mod_timer(&devstat->stats_timer,
- jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
-}
-
-/**
- * i40iw_hw_stats_stop_timer - Delete periodic stats timer
- * @vsi: pointer to the vsi structure
- */
-void i40iw_hw_stats_stop_timer(struct i40iw_sc_vsi *vsi)
-{
- struct i40iw_vsi_pestat *devstat = vsi->pestat;
-
- del_timer_sync(&devstat->stats_timer);
-}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
deleted file mode 100644
index b876d722fcc8..000000000000
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ /dev/null
@@ -1,2652 +0,0 @@
-/*******************************************************************************
-*
-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
-*
-* This software is available to you under a choice of one of two
-* licenses. You may choose to be licensed under the terms of the GNU
-* General Public License (GPL) Version 2, available from the file
-* COPYING in the main directory of this source tree, or the
-* OpenFabrics.org BSD license below:
-*
-* Redistribution and use in source and binary forms, with or
-* without modification, are permitted provided that the following
-* conditions are met:
-*
-* - Redistributions of source code must retain the above
-* copyright notice, this list of conditions and the following
-* disclaimer.
-*
-* - Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials
-* provided with the distribution.
-*
-* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-* SOFTWARE.
-*
-*******************************************************************************/
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/random.h>
-#include <linux/highmem.h>
-#include <linux/time.h>
-#include <linux/hugetlb.h>
-#include <linux/irq.h>
-#include <asm/byteorder.h>
-#include <net/ip.h>
-#include <rdma/ib_verbs.h>
-#include <rdma/iw_cm.h>
-#include <rdma/ib_user_verbs.h>
-#include <rdma/ib_umem.h>
-#include <rdma/uverbs_ioctl.h>
-#include "i40iw.h"
-
-/**
- * i40iw_query_device - get device attributes
- * @ibdev: device pointer from stack
- * @props: returning device attributes
- * @udata: user data
- */
-static int i40iw_query_device(struct ib_device *ibdev,
- struct ib_device_attr *props,
- struct ib_udata *udata)
-{
- struct i40iw_device *iwdev = to_iwdev(ibdev);
-
- if (udata->inlen || udata->outlen)
- return -EINVAL;
- memset(props, 0, sizeof(*props));
- ether_addr_copy((u8 *)&props->sys_image_guid, iwdev->netdev->dev_addr);
- props->fw_ver = i40iw_fw_major_ver(&iwdev->sc_dev) << 32 |
- i40iw_fw_minor_ver(&iwdev->sc_dev);
- props->device_cap_flags = iwdev->device_cap_flags;
- props->vendor_id = iwdev->ldev->pcidev->vendor;
- props->vendor_part_id = iwdev->ldev->pcidev->device;
- props->hw_ver = (u32)iwdev->sc_dev.hw_rev;
- props->max_mr_size = I40IW_MAX_OUTBOUND_MESSAGE_SIZE;
- props->max_qp = iwdev->max_qp - iwdev->used_qps;
- props->max_qp_wr = I40IW_MAX_QP_WRS;
- props->max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
- props->max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
- props->max_cq = iwdev->max_cq - iwdev->used_cqs;
- props->max_cqe = iwdev->max_cqe;
- props->max_mr = iwdev->max_mr - iwdev->used_mrs;
- props->max_pd = iwdev->max_pd - iwdev->used_pds;
- props->max_sge_rd = I40IW_MAX_SGE_RD;
- props->max_qp_rd_atom = I40IW_MAX_IRD_SIZE;
- props->max_qp_init_rd_atom = props->max_qp_rd_atom;
- props->atomic_cap = IB_ATOMIC_NONE;
- props->max_fast_reg_page_list_len = I40IW_MAX_PAGES_PER_FMR;
- return 0;
-}
-
-/**
- * i40iw_query_port - get port attrubutes
- * @ibdev: device pointer from stack
- * @port: port number for query
- * @props: returning device attributes
- */
-static int i40iw_query_port(struct ib_device *ibdev,
- u32 port,
- struct ib_port_attr *props)
-{
- props->lid = 1;
- props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
- IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
- props->gid_tbl_len = 1;
- props->active_width = IB_WIDTH_4X;
- props->active_speed = 1;
- props->max_msg_sz = I40IW_MAX_OUTBOUND_MESSAGE_SIZE;
- return 0;
-}
-
-/**
- * i40iw_alloc_ucontext - Allocate the user context data structure
- * @uctx: Uverbs context pointer from stack
- * @udata: user data
- *
- * This keeps track of all objects associated with a particular
- * user-mode client.
- */
-static int i40iw_alloc_ucontext(struct ib_ucontext *uctx,
- struct ib_udata *udata)
-{
- struct ib_device *ibdev = uctx->device;
- struct i40iw_device *iwdev = to_iwdev(ibdev);
- struct i40iw_alloc_ucontext_req req;
- struct i40iw_alloc_ucontext_resp uresp = {};
- struct i40iw_ucontext *ucontext = to_ucontext(uctx);
-
- if (ib_copy_from_udata(&req, udata, sizeof(req)))
- return -EINVAL;
-
- if (req.userspace_ver < 4 || req.userspace_ver > I40IW_ABI_VER) {
- i40iw_pr_err("Unsupported provider library version %u.\n", req.userspace_ver);
- return -EINVAL;
- }
-
- uresp.max_qps = iwdev->max_qp;
- uresp.max_pds = iwdev->max_pd;
- uresp.wq_size = iwdev->max_qp_wr * 2;
- uresp.kernel_ver = req.userspace_ver;
-
- ucontext->iwdev = iwdev;
- ucontext->abi_ver = req.userspace_ver;
-
- if (ib_copy_to_udata(udata, &uresp, sizeof(uresp)))
- return -EFAULT;
-
- INIT_LIST_HEAD(&ucontext->cq_reg_mem_list);
- spin_lock_init(&ucontext->cq_reg_mem_list_lock);
- INIT_LIST_HEAD(&ucontext->qp_reg_mem_list);
- spin_lock_init(&ucontext->qp_reg_mem_list_lock);
-
- return 0;
-}
-
-/**
- * i40iw_dealloc_ucontext - deallocate the user context data structure
- * @context: user context created during alloc
- */
-static void i40iw_dealloc_ucontext(struct ib_ucontext *context)
-{
- return;
-}
-
-/**
- * i40iw_mmap - user memory map
- * @context: context created during alloc
- * @vma: kernel info for user memory map
- */
-static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
-{
- struct i40iw_ucontext *ucontext = to_ucontext(context);
- u64 dbaddr;
-
- if (vma->vm_pgoff || vma->vm_end - vma->vm_start != PAGE_SIZE)
- return -EINVAL;
-
- dbaddr = I40IW_DB_ADDR_OFFSET + pci_resource_start(ucontext->iwdev->ldev->pcidev, 0);
-
- return rdma_user_mmap_io(context, vma, dbaddr >> PAGE_SHIFT, PAGE_SIZE,
- pgprot_noncached(vma->vm_page_prot), NULL);
-}
-
-/**
- * i40iw_alloc_pd - allocate protection domain
- * @pd: PD pointer
- * @udata: user data
- */
-static int i40iw_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
-{
- struct i40iw_pd *iwpd = to_iwpd(pd);
- struct i40iw_device *iwdev = to_iwdev(pd->device);
- struct i40iw_sc_dev *dev = &iwdev->sc_dev;
- struct i40iw_alloc_pd_resp uresp;
- struct i40iw_sc_pd *sc_pd;
- u32 pd_id = 0;
- int err;
-
- if (iwdev->closing)
- return -ENODEV;
-
- err = i40iw_alloc_resource(iwdev, iwdev->allocated_pds,
- iwdev->max_pd, &pd_id, &iwdev->next_pd);
- if (err) {
- i40iw_pr_err("alloc resource failed\n");
- return err;
- }
-
- sc_pd = &iwpd->sc_pd;
-
- if (udata) {
- struct i40iw_ucontext *ucontext = rdma_udata_to_drv_context(
- udata, struct i40iw_ucontext, ibucontext);
- dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id, ucontext->abi_ver);
- memset(&uresp, 0, sizeof(uresp));
- uresp.pd_id = pd_id;
- if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
- err = -EFAULT;
- goto error;
- }
- } else {
- dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id, -1);
- }
-
- i40iw_add_pdusecount(iwpd);
- return 0;
-
-error:
- i40iw_free_resource(iwdev, iwdev->allocated_pds, pd_id);
- return err;
-}
-
-/**
- * i40iw_dealloc_pd - deallocate pd
- * @ibpd: ptr of pd to be deallocated
- * @udata: user data or null for kernel object
- */
-static int i40iw_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
-{
- struct i40iw_pd *iwpd = to_iwpd(ibpd);
- struct i40iw_device *iwdev = to_iwdev(ibpd->device);
-
- i40iw_rem_pdusecount(iwpd, iwdev);
- return 0;
-}
-
-/**
- * i40iw_get_pbl - Retrieve pbl from a list given a virtual
- * address
- * @va: user virtual address
- * @pbl_list: pbl list to search in (QP's or CQ's)
- */
-static struct i40iw_pbl *i40iw_get_pbl(unsigned long va,
- struct list_head *pbl_list)
-{
- struct i40iw_pbl *iwpbl;
-
- list_for_each_entry(iwpbl, pbl_list, list) {
- if (iwpbl->user_base == va) {
- iwpbl->on_list = false;
- list_del(&iwpbl->list);
- return iwpbl;
- }
- }
- return NULL;
-}
-
-/**
- * i40iw_free_qp_resources - free up memory resources for qp
- * @iwqp: qp ptr (user or kernel)
- */
-void i40iw_free_qp_resources(struct i40iw_qp *iwqp)
-{
- struct i40iw_pbl *iwpbl = &iwqp->iwpbl;
- struct i40iw_device *iwdev = iwqp->iwdev;
- u32 qp_num = iwqp->ibqp.qp_num;
-
- i40iw_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp);
- if (qp_num)
- i40iw_free_resource(iwdev, iwdev->allocated_qps, qp_num);
- if (iwpbl->pbl_allocated)
- i40iw_free_pble(iwdev->pble_rsrc, &iwpbl->pble_alloc);
- i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->q2_ctx_mem);
- i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->kqp.dma_mem);
- kfree(iwqp->kqp.wrid_mem);
- iwqp->kqp.wrid_mem = NULL;
- kfree(iwqp);
-}
-
-/**
- * i40iw_clean_cqes - clean cq entries for qp
- * @iwqp: qp ptr (user or kernel)
- * @iwcq: cq ptr
- */
-static void i40iw_clean_cqes(struct i40iw_qp *iwqp, struct i40iw_cq *iwcq)
-{
- struct i40iw_cq_uk *ukcq = &iwcq->sc_cq.cq_uk;
-
- ukcq->ops.iw_cq_clean(&iwqp->sc_qp.qp_uk, ukcq);
-}
-
-/**
- * i40iw_destroy_qp - destroy qp
- * @ibqp: qp's ib pointer also to get to device's qp address
- * @udata: user data
- */
-static int i40iw_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
-{
- struct i40iw_qp *iwqp = to_iwqp(ibqp);
- struct ib_qp_attr attr;
- struct i40iw_device *iwdev = iwqp->iwdev;
-
- memset(&attr, 0, sizeof(attr));
-
- iwqp->destroyed = 1;
-
- if (iwqp->ibqp_state >= IB_QPS_INIT && iwqp->ibqp_state < IB_QPS_RTS)
- i40iw_next_iw_state(iwqp, I40IW_QP_STATE_ERROR, 0, 0, 0);
-
- if (!iwqp->user_mode) {
- if (iwqp->iwscq) {
- i40iw_clean_cqes(iwqp, iwqp->iwscq);
- if (iwqp->iwrcq != iwqp->iwscq)
- i40iw_clean_cqes(iwqp, iwqp->iwrcq);
- }
- }
-
- attr.qp_state = IB_QPS_ERR;
- i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
- i40iw_qp_rem_ref(&iwqp->ibqp);
- wait_for_completion(&iwqp->free_qp);
- i40iw_cqp_qp_destroy_cmd(&iwdev->sc_dev, &iwqp->sc_qp);
- i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
- i40iw_free_qp_resources(iwqp);
- i40iw_rem_devusecount(iwdev);
-
- return 0;
-}
-
-/**
- * i40iw_setup_virt_qp - setup for allocation of virtual qp
- * @iwdev: iwarp device
- * @iwqp: qp ptr
- * @init_info: initialize info to return
- */
-static int i40iw_setup_virt_qp(struct i40iw_device *iwdev,
- struct i40iw_qp *iwqp,
- struct i40iw_qp_init_info *init_info)
-{
- struct i40iw_pbl *iwpbl = &iwqp->iwpbl;
- struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr;
-
- iwqp->page = qpmr->sq_page;
- init_info->shadow_area_pa = cpu_to_le64(qpmr->shadow);
- if (iwpbl->pbl_allocated) {
- init_info->virtual_map = true;
- init_info->sq_pa = qpmr->sq_pbl.idx;
- init_info->rq_pa = qpmr->rq_pbl.idx;
- } else {
- init_info->sq_pa = qpmr->sq_pbl.addr;
- init_info->rq_pa = qpmr->rq_pbl.addr;
- }
- return 0;
-}
-
-/**
- * i40iw_setup_kmode_qp - setup initialization for kernel mode qp
- * @iwdev: iwarp device
- * @iwqp: qp ptr (user or kernel)
- * @info: initialize info to return
- */
-static int i40iw_setup_kmode_qp(struct i40iw_device *iwdev,
- struct i40iw_qp *iwqp,
- struct i40iw_qp_init_info *info)
-{
- struct i40iw_dma_mem *mem = &iwqp->kqp.dma_mem;
- u32 sqdepth, rqdepth;
- u8 sqshift;
- u32 size;
- enum i40iw_status_code status;
- struct i40iw_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
-
- i40iw_get_wqe_shift(ukinfo->max_sq_frag_cnt, ukinfo->max_inline_data, &sqshift);
- status = i40iw_get_sqdepth(ukinfo->sq_size, sqshift, &sqdepth);
- if (status)
- return -ENOMEM;
-
- status = i40iw_get_rqdepth(ukinfo->rq_size, I40IW_MAX_RQ_WQE_SHIFT, &rqdepth);
- if (status)
- return -ENOMEM;
-
- size = sqdepth * sizeof(struct i40iw_sq_uk_wr_trk_info) + (rqdepth << 3);
- iwqp->kqp.wrid_mem = kzalloc(size, GFP_KERNEL);
-
- ukinfo->sq_wrtrk_array = (struct i40iw_sq_uk_wr_trk_info *)iwqp->kqp.wrid_mem;
- if (!ukinfo->sq_wrtrk_array)
- return -ENOMEM;
-
- ukinfo->rq_wrid_array = (u64 *)&ukinfo->sq_wrtrk_array[sqdepth];
-
- size = (sqdepth + rqdepth) * I40IW_QP_WQE_MIN_SIZE;
- size += (I40IW_SHADOW_AREA_SIZE << 3);
-
- status = i40iw_allocate_dma_mem(iwdev->sc_dev.hw, mem, size, 256);
- if (status) {
- kfree(ukinfo->sq_wrtrk_array);
- ukinfo->sq_wrtrk_array = NULL;
- return -ENOMEM;
- }
-
- ukinfo->sq = mem->va;
- info->sq_pa = mem->pa;
-
- ukinfo->rq = &ukinfo->sq[sqdepth];
- info->rq_pa = info->sq_pa + (sqdepth * I40IW_QP_WQE_MIN_SIZE);
-
- ukinfo->shadow_area = ukinfo->rq[rqdepth].elem;
- info->shadow_area_pa = info->rq_pa + (rqdepth * I40IW_QP_WQE_MIN_SIZE);
-
- ukinfo->sq_size = sqdepth >> sqshift;
- ukinfo->rq_size = rqdepth >> I40IW_MAX_RQ_WQE_SHIFT;
- ukinfo->qp_id = iwqp->ibqp.qp_num;
- return 0;
-}
-
-/**
- * i40iw_create_qp - create qp
- * @ibpd: ptr of pd
- * @init_attr: attributes for qp
- * @udata: user data for create qp
- */
-static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata)
-{
- struct i40iw_pd *iwpd = to_iwpd(ibpd);
- struct i40iw_device *iwdev = to_iwdev(ibpd->device);
- struct i40iw_cqp *iwcqp = &iwdev->cqp;
- struct i40iw_qp *iwqp;
- struct i40iw_ucontext *ucontext = rdma_udata_to_drv_context(
- udata, struct i40iw_ucontext, ibucontext);
- struct i40iw_create_qp_req req;
- struct i40iw_create_qp_resp uresp;
- u32 qp_num = 0;
- enum i40iw_status_code ret;
- int err_code;
- int sq_size;
- int rq_size;
- struct i40iw_sc_qp *qp;
- struct i40iw_sc_dev *dev = &iwdev->sc_dev;
- struct i40iw_qp_init_info init_info;
- struct i40iw_create_qp_info *qp_info;
- struct i40iw_cqp_request *cqp_request;
- struct cqp_commands_info *cqp_info;
-
- struct i40iw_qp_host_ctx_info *ctx_info;
- struct i40iwarp_offload_info *iwarp_info;
- unsigned long flags;
-
- if (iwdev->closing)
- return ERR_PTR(-ENODEV);
-
- if (init_attr->create_flags)
- return ERR_PTR(-EOPNOTSUPP);
- if (init_attr->cap.max_inline_data > I40IW_MAX_INLINE_DATA_SIZE)
- init_attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
-
- if (init_attr->cap.max_send_sge > I40IW_MAX_WQ_FRAGMENT_COUNT)
- init_attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
-
- if (init_attr->cap.max_recv_sge > I40IW_MAX_WQ_FRAGMENT_COUNT)
- init_attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
-
- memset(&init_info, 0, sizeof(init_info));
-
- sq_size = init_attr->cap.max_send_wr;
- rq_size = init_attr->cap.max_recv_wr;
-
- init_info.vsi = &iwdev->vsi;
- init_info.qp_uk_init_info.sq_size = sq_size;
- init_info.qp_uk_init_info.rq_size = rq_size;
- init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
- init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
- init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data;
-
- iwqp = kzalloc(sizeof(*iwqp), GFP_KERNEL);
- if (!iwqp)
- return ERR_PTR(-ENOMEM);
-
- qp = &iwqp->sc_qp;
- qp->back_qp = (void *)iwqp;
- iwqp->iwdev = iwdev;
- iwqp->ctx_info.iwarp_info = &iwqp->iwarp_info;
-
- if (i40iw_allocate_dma_mem(dev->hw,
- &iwqp->q2_ctx_mem,
- I40IW_Q2_BUFFER_SIZE + I40IW_QP_CTX_SIZE,
- 256)) {
- i40iw_pr_err("dma_mem failed\n");
- err_code = -ENOMEM;
- goto error;
- }
-
- init_info.q2 = iwqp->q2_ctx_mem.va;
- init_info.q2_pa = iwqp->q2_ctx_mem.pa;
-
- init_info.host_ctx = (void *)init_info.q2 + I40IW_Q2_BUFFER_SIZE;
- init_info.host_ctx_pa = init_info.q2_pa + I40IW_Q2_BUFFER_SIZE;
-
- err_code = i40iw_alloc_resource(iwdev, iwdev->allocated_qps, iwdev->max_qp,
- &qp_num, &iwdev->next_qp);
- if (err_code) {
- i40iw_pr_err("qp resource\n");
- goto error;
- }
-
- iwqp->iwpd = iwpd;
- iwqp->ibqp.qp_num = qp_num;
- qp = &iwqp->sc_qp;
- iwqp->iwscq = to_iwcq(init_attr->send_cq);
- iwqp->iwrcq = to_iwcq(init_attr->recv_cq);
-
- iwqp->host_ctx.va = init_info.host_ctx;
- iwqp->host_ctx.pa = init_info.host_ctx_pa;
- iwqp->host_ctx.size = I40IW_QP_CTX_SIZE;
-
- init_info.pd = &iwpd->sc_pd;
- init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num;
- iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
-
- if (init_attr->qp_type != IB_QPT_RC) {
- err_code = -EOPNOTSUPP;
- goto error;
- }
- if (udata) {
- err_code = ib_copy_from_udata(&req, udata, sizeof(req));
- if (err_code) {
- i40iw_pr_err("ib_copy_from_data\n");
- goto error;
- }
- iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
- iwqp->user_mode = 1;
-
- if (req.user_wqe_buffers) {
- struct i40iw_pbl *iwpbl;
-
- spin_lock_irqsave(
- &ucontext->qp_reg_mem_list_lock, flags);
- iwpbl = i40iw_get_pbl(
- (unsigned long)req.user_wqe_buffers,
- &ucontext->qp_reg_mem_list);
- spin_unlock_irqrestore(
- &ucontext->qp_reg_mem_list_lock, flags);
-
- if (!iwpbl) {
- err_code = -ENODATA;
- i40iw_pr_err("no pbl info\n");
- goto error;
- }
- memcpy(&iwqp->iwpbl, iwpbl, sizeof(iwqp->iwpbl));
- }
- err_code = i40iw_setup_virt_qp(iwdev, iwqp, &init_info);
- } else {
- err_code = i40iw_setup_kmode_qp(iwdev, iwqp, &init_info);
- }
-
- if (err_code) {
- i40iw_pr_err("setup qp failed\n");
- goto error;
- }
-
- init_info.type = I40IW_QP_TYPE_IWARP;
- ret = dev->iw_priv_qp_ops->qp_init(qp, &init_info);
- if (ret) {
- err_code = -EPROTO;
- i40iw_pr_err("qp_init fail\n");
- goto error;
- }
- ctx_info = &iwqp->ctx_info;
- iwarp_info = &iwqp->iwarp_info;
- iwarp_info->rd_enable = true;
- iwarp_info->wr_rdresp_en = true;
- if (!iwqp->user_mode) {
- iwarp_info->fast_reg_en = true;
- iwarp_info->priv_mode_en = true;
- }
- iwarp_info->ddp_ver = 1;
- iwarp_info->rdmap_ver = 1;
-
- ctx_info->iwarp_info_valid = true;
- ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
- ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
- ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
- (u64 *)iwqp->host_ctx.va,
- ctx_info);
- ctx_info->iwarp_info_valid = false;
- cqp_request = i40iw_get_cqp_request(iwcqp, true);
- if (!cqp_request) {
- err_code = -ENOMEM;
- goto error;
- }
- cqp_info = &cqp_request->info;
- qp_info = &cqp_request->info.in.u.qp_create.info;
-
- memset(qp_info, 0, sizeof(*qp_info));
-
- qp_info->cq_num_valid = true;
- qp_info->next_iwarp_state = I40IW_QP_STATE_IDLE;
-
- cqp_info->cqp_cmd = OP_QP_CREATE;
- cqp_info->post_sq = 1;
- cqp_info->in.u.qp_create.qp = qp;
- cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
- ret = i40iw_handle_cqp_op(iwdev, cqp_request);
- if (ret) {
- i40iw_pr_err("CQP-OP QP create fail");
- err_code = -EACCES;
- goto error;
- }
-
- refcount_set(&iwqp->refcount, 1);
- spin_lock_init(&iwqp->lock);
- iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
- iwdev->qp_table[qp_num] = iwqp;
- i40iw_add_pdusecount(iwqp->iwpd);
- i40iw_add_devusecount(iwdev);
- if (udata) {
- memset(&uresp, 0, sizeof(uresp));
- uresp.actual_sq_size = sq_size;
- uresp.actual_rq_size = rq_size;
- uresp.qp_id = qp_num;
- uresp.push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;
- err_code = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
- if (err_code) {
- i40iw_pr_err("copy_to_udata failed\n");
- i40iw_destroy_qp(&iwqp->ibqp, udata);
- /* let the completion of the qp destroy free the qp */
- return ERR_PTR(err_code);
- }
- }
- init_completion(&iwqp->sq_drained);
- init_completion(&iwqp->rq_drained);
- init_completion(&iwqp->free_qp);
-
- return &iwqp->ibqp;
-error:
- i40iw_free_qp_resources(iwqp);
- return ERR_PTR(err_code);
-}
-
-/**
- * i40iw_query_qp - query qp attributes
- * @ibqp: qp pointer
- * @attr: attributes pointer
- * @attr_mask: Not used
- * @init_attr: qp attributes to return
- */
-static int i40iw_query_qp(struct ib_qp *ibqp,
- struct ib_qp_attr *attr,
- int attr_mask,
- struct ib_qp_init_attr *init_attr)
-{
- struct i40iw_qp *iwqp = to_iwqp(ibqp);
- struct i40iw_sc_qp *qp = &iwqp->sc_qp;
-
- attr->qp_state = iwqp->ibqp_state;
- attr->cur_qp_state = attr->qp_state;
- attr->qp_access_flags = 0;
- attr->cap.max_send_wr = qp->qp_uk.sq_size;
- attr->cap.max_recv_wr = qp->qp_uk.rq_size;
- attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
- attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
- attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
- attr->port_num = 1;
- init_attr->event_handler = iwqp->ibqp.event_handler;
- init_attr->qp_context = iwqp->ibqp.qp_context;
- init_attr->send_cq = iwqp->ibqp.send_cq;
- init_attr->recv_cq = iwqp->ibqp.recv_cq;
- init_attr->srq = iwqp->ibqp.srq;
- init_attr->cap = attr->cap;
- init_attr->port_num = 1;
- return 0;
-}
-
-/**
- * i40iw_hw_modify_qp - setup cqp for modify qp
- * @iwdev: iwarp device
- * @iwqp: qp ptr (user or kernel)
- * @info: info for modify qp
- * @wait: flag to wait or not for modify qp completion
- */
-void i40iw_hw_modify_qp(struct i40iw_device *iwdev, struct i40iw_qp *iwqp,
- struct i40iw_modify_qp_info *info, bool wait)
-{
- struct i40iw_cqp_request *cqp_request;
- struct cqp_commands_info *cqp_info;
- struct i40iw_modify_qp_info *m_info;
- struct i40iw_gen_ae_info ae_info;
-
- cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait);
- if (!cqp_request)
- return;
-
- cqp_info = &cqp_request->info;
- m_info = &cqp_info->in.u.qp_modify.info;
- memcpy(m_info, info, sizeof(*m_info));
- cqp_info->cqp_cmd = OP_QP_MODIFY;
- cqp_info->post_sq = 1;
- cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp;
- cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request;
- if (!i40iw_handle_cqp_op(iwdev, cqp_request))
- return;
-
- switch (m_info->next_iwarp_state) {
- case I40IW_QP_STATE_RTS:
- if (iwqp->iwarp_state == I40IW_QP_STATE_IDLE)
- i40iw_send_reset(iwqp->cm_node);
- fallthrough;
- case I40IW_QP_STATE_IDLE:
- case I40IW_QP_STATE_TERMINATE:
- case I40IW_QP_STATE_CLOSING:
- ae_info.ae_code = I40IW_AE_BAD_CLOSE;
- ae_info.ae_source = 0;
- i40iw_gen_ae(iwdev, &iwqp->sc_qp, &ae_info, false);
- break;
- case I40IW_QP_STATE_ERROR:
- default:
- break;
- }
-}
-
-/**
- * i40iw_modify_qp - modify qp request
- * @ibqp: qp's pointer for modify
- * @attr: access attributes
- * @attr_mask: state mask
- * @udata: user data
- */
-int i40iw_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
- int attr_mask, struct ib_udata *udata)
-{
- struct i40iw_qp *iwqp = to_iwqp(ibqp);
- struct i40iw_device *iwdev = iwqp->iwdev;
- struct i40iw_qp_host_ctx_info *ctx_info;
- struct i40iwarp_offload_info *iwarp_info;
- struct i40iw_modify_qp_info info;
- u8 issue_modify_qp = 0;
- u8 dont_wait = 0;
- u32 err;
- unsigned long flags;
-
- if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
- return -EOPNOTSUPP;
-
- memset(&info, 0, sizeof(info));
- ctx_info = &iwqp->ctx_info;
- iwarp_info = &iwqp->iwarp_info;
-
- spin_lock_irqsave(&iwqp->lock, flags);
-
- if (attr_mask & IB_QP_STATE) {
- if (iwdev->closing && attr->qp_state != IB_QPS_ERR) {
- err = -EINVAL;
- goto exit;
- }
-
- switch (attr->qp_state) {
- case IB_QPS_INIT:
- case IB_QPS_RTR:
- if (iwqp->iwarp_state > (u32)I40IW_QP_STATE_IDLE) {
- err = -EINVAL;
- goto exit;
- }
- if (iwqp->iwarp_state == I40IW_QP_STATE_INVALID) {
- info.next_iwarp_state = I40IW_QP_STATE_IDLE;
- issue_modify_qp = 1;
- }
- break;
- case IB_QPS_RTS:
- if ((iwqp->iwarp_state > (u32)I40IW_QP_STATE_RTS) ||
- (!iwqp->cm_id)) {
- err = -EINVAL;
- goto exit;
- }
-
- issue_modify_qp = 1;
- iwqp->hw_tcp_state = I40IW_TCP_STATE_ESTABLISHED;
- iwqp->hte_added = 1;
- info.next_iwarp_state = I40IW_QP_STATE_RTS;
- info.tcp_ctx_valid = true;
- info.ord_valid = true;
- info.arp_cache_idx_valid = true;
- info.cq_num_valid = true;
- break;
- case IB_QPS_SQD:
- if (iwqp->hw_iwarp_state > (u32)I40IW_QP_STATE_RTS) {
- err = 0;
- goto exit;
- }
- if ((iwqp->iwarp_state == (u32)I40IW_QP_STATE_CLOSING) ||
- (iwqp->iwarp_state < (u32)I40IW_QP_STATE_RTS)) {
- err = 0;
- goto exit;
- }
- if (iwqp->iwarp_state > (u32)I40IW_QP_STATE_CLOSING) {
- err = -EINVAL;
- goto exit;
- }
- info.next_iwarp_state = I40IW_QP_STATE_CLOSING;
- issue_modify_qp = 1;
- break;
- case IB_QPS_SQE:
- if (iwqp->iwarp_state >= (u32)I40IW_QP_STATE_TERMINATE) {
- err = -EINVAL;
- goto exit;
- }
- info.next_iwarp_state = I40IW_QP_STATE_TERMINATE;
- issue_modify_qp = 1;
- break;
- case IB_QPS_ERR:
- case IB_QPS_RESET:
- if (iwqp->iwarp_state == (u32)I40IW_QP_STATE_ERROR) {
- err = -EINVAL;
- goto exit;
- }
- if (iwqp->sc_qp.term_flags)
- i40iw_terminate_del_timer(&iwqp->sc_qp);
- info.next_iwarp_state = I40IW_QP_STATE_ERROR;
- if ((iwqp->hw_tcp_state > I40IW_TCP_STATE_CLOSED) &&
- iwdev->iw_status &&
- (iwqp->hw_tcp_state != I40IW_TCP_STATE_TIME_WAIT))
- info.reset_tcp_conn = true;
- else
- dont_wait = 1;
- issue_modify_qp = 1;
- info.next_iwarp_state = I40IW_QP_STATE_ERROR;
- break;
- default:
- err = -EINVAL;
- goto exit;
- }
-
- iwqp->ibqp_state = attr->qp_state;
-
- }
- if (attr_mask & IB_QP_ACCESS_FLAGS) {
- ctx_info->iwarp_info_valid = true;
- if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
- iwarp_info->wr_rdresp_en = true;
- if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
- iwarp_info->wr_rdresp_en = true;
- if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
- iwarp_info->rd_enable = true;
- if (attr->qp_access_flags & IB_ACCESS_MW_BIND)
- iwarp_info->bind_en = true;
-
- if (iwqp->user_mode) {
- iwarp_info->rd_enable = true;
- iwarp_info->wr_rdresp_en = true;
- iwarp_info->priv_mode_en = false;
- }
- }
-
- if (ctx_info->iwarp_info_valid) {
- struct i40iw_sc_dev *dev = &iwdev->sc_dev;
- int ret;
-
- ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
- ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
- ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
- (u64 *)iwqp->host_ctx.va,
- ctx_info);
- if (ret) {
- i40iw_pr_err("setting QP context\n");
- err = -EINVAL;
- goto exit;
- }
- }
-
- spin_unlock_irqrestore(&iwqp->lock, flags);
-
- if (issue_modify_qp) {
- i40iw_hw_modify_qp(iwdev, iwqp, &info, true);
-
- spin_lock_irqsave(&iwqp->lock, flags);
- iwqp->iwarp_state = info.next_iwarp_state;
- spin_unlock_irqrestore(&iwqp->lock, flags);
- }
-
- if (issue_modify_qp && (iwqp->ibqp_state > IB_QPS_RTS)) {
- if (dont_wait) {
- if (iwqp->cm_id && iwqp->hw_tcp_state) {
- spin_lock_irqsave(&iwqp->lock, flags);
- iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED;
- iwqp->last_aeq = I40IW_AE_RESET_SENT;
- spin_unlock_irqrestore(&iwqp->lock, flags);
- i40iw_cm_disconn(iwqp);
- }
- } else {
- spin_lock_irqsave(&iwqp->lock, flags);
- if (iwqp->cm_id) {
- if (atomic_inc_return(&iwqp->close_timer_started) == 1) {
- iwqp->cm_id->add_ref(iwqp->cm_id);
- i40iw_schedule_cm_timer(iwqp->cm_node,
- (struct i40iw_puda_buf *)iwqp,
- I40IW_TIMER_TYPE_CLOSE, 1, 0);
- }
- }
- spin_unlock_irqrestore(&iwqp->lock, flags);
- }
- }
- return 0;
-exit:
- spin_unlock_irqrestore(&iwqp->lock, flags);
- return err;
-}
-
-/**
- * cq_free_resources - free up recources for cq
- * @iwdev: iwarp device
- * @iwcq: cq ptr
- */
-static void cq_free_resources(struct i40iw_device *iwdev, struct i40iw_cq *iwcq)
-{
- struct i40iw_sc_cq *cq = &iwcq->sc_cq;
-
- if (!iwcq->user_mode)
- i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwcq->kmem);
- i40iw_free_resource(iwdev, iwdev->allocated_cqs, cq->cq_uk.cq_id);
-}
-
-/**
- * i40iw_cq_wq_destroy - send cq destroy cqp
- * @iwdev: iwarp device
- * @cq: hardware control cq
- */
-void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq)
-{
- enum i40iw_status_code status;
- struct i40iw_cqp_request *cqp_request;
- struct cqp_commands_info *cqp_info;
-
- cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
- if (!cqp_request)
- return;
-
- cqp_info = &cqp_request->info;
-
- cqp_info->cqp_cmd = OP_CQ_DESTROY;
- cqp_info->post_sq = 1;
- cqp_info->in.u.cq_destroy.cq = cq;
- cqp_info->in.u.cq_destroy.scratch = (uintptr_t)cqp_request;
- status = i40iw_handle_cqp_op(iwdev, cqp_request);
- if (status)
- i40iw_pr_err("CQP-OP Destroy QP fail");
-}
-
-/**
- * i40iw_destroy_cq - destroy cq
- * @ib_cq: cq pointer
- * @udata: user data or NULL for kernel object
- */
-static int i40iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
-{
- struct i40iw_cq *iwcq;
- struct i40iw_device *iwdev;
- struct i40iw_sc_cq *cq;
-
- iwcq = to_iwcq(ib_cq);
- iwdev = to_iwdev(ib_cq->device);
- cq = &iwcq->sc_cq;
- i40iw_cq_wq_destroy(iwdev, cq);
- cq_free_resources(iwdev, iwcq);
- i40iw_rem_devusecount(iwdev);
- return 0;
-}
-
-/**
- * i40iw_create_cq - create cq
- * @ibcq: CQ allocated
- * @attr: attributes for cq
- * @udata: user data
- */
-static int i40iw_create_cq(struct ib_cq *ibcq,
- const struct ib_cq_init_attr *attr,
- struct ib_udata *udata)
-{
- struct ib_device *ibdev = ibcq->device;
- struct i40iw_device *iwdev = to_iwdev(ibdev);
- struct i40iw_cq *iwcq = to_iwcq(ibcq);
- struct i40iw_pbl *iwpbl;
- u32 cq_num = 0;
- struct i40iw_sc_cq *cq;
- struct i40iw_sc_dev *dev = &iwdev->sc_dev;
- struct i40iw_cq_init_info info = {};
- enum i40iw_status_code status;
- struct i40iw_cqp_request *cqp_request;
- struct cqp_commands_info *cqp_info;
- struct i40iw_cq_uk_init_info *ukinfo = &info.cq_uk_init_info;
- unsigned long flags;
- int err_code;
- int entries = attr->cqe;
-
- if (attr->flags)
- return -EOPNOTSUPP;
-
- if (iwdev->closing)
- return -ENODEV;
-
- if (entries > iwdev->max_cqe)
- return -EINVAL;
-
- err_code = i40iw_alloc_resource(iwdev, iwdev->allocated_cqs,
- iwdev->max_cq, &cq_num,
- &iwdev->next_cq);
- if (err_code)
- return err_code;
-
- cq = &iwcq->sc_cq;
- cq->back_cq = (void *)iwcq;
- spin_lock_init(&iwcq->lock);
-
- info.dev = dev;
- ukinfo->cq_size = max(entries, 4);
- ukinfo->cq_id = cq_num;
- iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
- info.ceqe_mask = 0;
- if (attr->comp_vector < iwdev->ceqs_count)
- info.ceq_id = attr->comp_vector;
- info.ceq_id_valid = true;
- info.ceqe_mask = 1;
- info.type = I40IW_CQ_TYPE_IWARP;
- if (udata) {
- struct i40iw_ucontext *ucontext = rdma_udata_to_drv_context(
- udata, struct i40iw_ucontext, ibucontext);
- struct i40iw_create_cq_req req;
- struct i40iw_cq_mr *cqmr;
-
- memset(&req, 0, sizeof(req));
- iwcq->user_mode = true;
- if (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req))) {
- err_code = -EFAULT;
- goto cq_free_resources;
- }
-
- spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
- iwpbl = i40iw_get_pbl((unsigned long)req.user_cq_buffer,
- &ucontext->cq_reg_mem_list);
- spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
- if (!iwpbl) {
- err_code = -EPROTO;
- goto cq_free_resources;
- }
-
- iwcq->iwpbl = iwpbl;
- iwcq->cq_mem_size = 0;
- cqmr = &iwpbl->cq_mr;
- info.shadow_area_pa = cpu_to_le64(cqmr->shadow);
- if (iwpbl->pbl_allocated) {
- info.virtual_map = true;
- info.pbl_chunk_size = 1;
- info.first_pm_pbl_idx = cqmr->cq_pbl.idx;
- } else {
- info.cq_base_pa = cqmr->cq_pbl.addr;
- }
- } else {
- /* Kmode allocations */
- int rsize;
- int shadow;
-
- rsize = info.cq_uk_init_info.cq_size * sizeof(struct i40iw_cqe);
- rsize = round_up(rsize, 256);
- shadow = I40IW_SHADOW_AREA_SIZE << 3;
- status = i40iw_allocate_dma_mem(dev->hw, &iwcq->kmem,
- rsize + shadow, 256);
- if (status) {
- err_code = -ENOMEM;
- goto cq_free_resources;
- }
- ukinfo->cq_base = iwcq->kmem.va;
- info.cq_base_pa = iwcq->kmem.pa;
- info.shadow_area_pa = info.cq_base_pa + rsize;
- ukinfo->shadow_area = iwcq->kmem.va + rsize;
- }
-
- if (dev->iw_priv_cq_ops->cq_init(cq, &info)) {
- i40iw_pr_err("init cq fail\n");
- err_code = -EPROTO;
- goto cq_free_resources;
- }
-
- cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
- if (!cqp_request) {
- err_code = -ENOMEM;
- goto cq_free_resources;
- }
-
- cqp_info = &cqp_request->info;
- cqp_info->cqp_cmd = OP_CQ_CREATE;
- cqp_info->post_sq = 1;
- cqp_info->in.u.cq_create.cq = cq;
- cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
- status = i40iw_handle_cqp_op(iwdev, cqp_request);
- if (status) {
- i40iw_pr_err("CQP-OP Create QP fail");
- err_code = -EPROTO;
- goto cq_free_resources;
- }
-
- if (udata) {
- struct i40iw_create_cq_resp resp;
-
- memset(&resp, 0, sizeof(resp));
- resp.cq_id = info.cq_uk_init_info.cq_id;
- resp.cq_size = info.cq_uk_init_info.cq_size;
- if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
- i40iw_pr_err("copy to user data\n");
- err_code = -EPROTO;
- goto cq_destroy;
- }
- }
-
- i40iw_add_devusecount(iwdev);
- return 0;
-
-cq_destroy:
- i40iw_cq_wq_destroy(iwdev, cq);
-cq_free_resources:
- cq_free_resources(iwdev, iwcq);
- return err_code;
-}
-
-/**
- * i40iw_get_user_access - get hw access from IB access
- * @acc: IB access to return hw access
- */
-static inline u16 i40iw_get_user_access(int acc)
-{
- u16 access = 0;
-
- access |= (acc & IB_ACCESS_LOCAL_WRITE) ? I40IW_ACCESS_FLAGS_LOCALWRITE : 0;
- access |= (acc & IB_ACCESS_REMOTE_WRITE) ? I40IW_ACCESS_FLAGS_REMOTEWRITE : 0;
- access |= (acc & IB_ACCESS_REMOTE_READ) ? I40IW_ACCESS_FLAGS_REMOTEREAD : 0;
- access |= (acc & IB_ACCESS_MW_BIND) ? I40IW_ACCESS_FLAGS_BIND_WINDOW : 0;
- return access;
-}
-
-/**
- * i40iw_free_stag - free stag resource
- * @iwdev: iwarp device
- * @stag: stag to free
- */
-static void i40iw_free_stag(struct i40iw_device *iwdev, u32 stag)
-{
- u32 stag_idx;
-
- stag_idx = (stag & iwdev->mr_stagmask) >> I40IW_CQPSQ_STAG_IDX_SHIFT;
- i40iw_free_resource(iwdev, iwdev->allocated_mrs, stag_idx);
- i40iw_rem_devusecount(iwdev);
-}
-
-/**
- * i40iw_create_stag - create random stag
- * @iwdev: iwarp device
- */
-static u32 i40iw_create_stag(struct i40iw_device *iwdev)
-{
- u32 stag = 0;
- u32 stag_index = 0;
- u32 next_stag_index;
- u32 driver_key;
- u32 random;
- u8 consumer_key;
- int ret;
-
- get_random_bytes(&random, sizeof(random));
- consumer_key = (u8)random;
-
- driver_key = random & ~iwdev->mr_stagmask;
- next_stag_index = (random & iwdev->mr_stagmask) >> 8;
- next_stag_index %= iwdev->max_mr;
-
- ret = i40iw_alloc_resource(iwdev,
- iwdev->allocated_mrs, iwdev->max_mr,
- &stag_index, &next_stag_index);
- if (!ret) {
- stag = stag_index << I40IW_CQPSQ_STAG_IDX_SHIFT;
- stag |= driver_key;
- stag += (u32)consumer_key;
- i40iw_add_devusecount(iwdev);
- }
- return stag;
-}
-
-/**
- * i40iw_next_pbl_addr - Get next pbl address
- * @pbl: pointer to a pble
- * @pinfo: info pointer
- * @idx: index
- */
-static inline u64 *i40iw_next_pbl_addr(u64 *pbl,
- struct i40iw_pble_info **pinfo,
- u32 *idx)
-{
- *idx += 1;
- if ((!(*pinfo)) || (*idx != (*pinfo)->cnt))
- return ++pbl;
- *idx = 0;
- (*pinfo)++;
- return (u64 *)(*pinfo)->addr;
-}
-
-/**
- * i40iw_copy_user_pgaddrs - copy user page address to pble's os locally
- * @iwmr: iwmr for IB's user page addresses
- * @pbl: ple pointer to save 1 level or 0 level pble
- * @level: indicated level 0, 1 or 2
- */
-static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr,
- u64 *pbl,
- enum i40iw_pble_level level)
-{
- struct ib_umem *region = iwmr->region;
- struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
- struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
- struct i40iw_pble_info *pinfo;
- struct ib_block_iter biter;
- u32 idx = 0;
-
- pinfo = (level == I40IW_LEVEL_1) ? NULL : palloc->level2.leaf;
-
- if (iwmr->type == IW_MEMREG_TYPE_QP)
- iwpbl->qp_mr.sq_page = sg_page(region->sg_head.sgl);
-
- rdma_umem_for_each_dma_block(region, &biter, iwmr->page_size) {
- *pbl = rdma_block_iter_dma_address(&biter);
- pbl = i40iw_next_pbl_addr(pbl, &pinfo, &idx);
- }
-}
-
-/**
- * i40iw_check_mem_contiguous - check if pbls stored in arr are contiguous
- * @arr: lvl1 pbl array
- * @npages: page count
- * @pg_size: page size
- *
- */
-static bool i40iw_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size)
-{
- u32 pg_idx;
-
- for (pg_idx = 0; pg_idx < npages; pg_idx++) {
- if ((*arr + (pg_size * pg_idx)) != arr[pg_idx])
- return false;
- }
- return true;
-}
-
-/**
- * i40iw_check_mr_contiguous - check if MR is physically contiguous
- * @palloc: pbl allocation struct
- * @pg_size: page size
- */
-static bool i40iw_check_mr_contiguous(struct i40iw_pble_alloc *palloc, u32 pg_size)
-{
- struct i40iw_pble_level2 *lvl2 = &palloc->level2;
- struct i40iw_pble_info *leaf = lvl2->leaf;
- u64 *arr = NULL;
- u64 *start_addr = NULL;
- int i;
- bool ret;
-
- if (palloc->level == I40IW_LEVEL_1) {
- arr = (u64 *)palloc->level1.addr;
- ret = i40iw_check_mem_contiguous(arr, palloc->total_cnt, pg_size);
- return ret;
- }
-
- start_addr = (u64 *)leaf->addr;
-
- for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
- arr = (u64 *)leaf->addr;
- if ((*start_addr + (i * pg_size * PBLE_PER_PAGE)) != *arr)
- return false;
- ret = i40iw_check_mem_contiguous(arr, leaf->cnt, pg_size);
- if (!ret)
- return false;
- }
-
- return true;
-}
-
-/**
- * i40iw_setup_pbles - copy user pg address to pble's
- * @iwdev: iwarp device
- * @iwmr: mr pointer for this memory registration
- * @use_pbles: flag if to use pble's
- */
-static int i40iw_setup_pbles(struct i40iw_device *iwdev,
- struct i40iw_mr *iwmr,
- bool use_pbles)
-{
- struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
- struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
- struct i40iw_pble_info *pinfo;
- u64 *pbl;
- enum i40iw_status_code status;
- enum i40iw_pble_level level = I40IW_LEVEL_1;
-
- if (use_pbles) {
- mutex_lock(&iwdev->pbl_mutex);
- status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);
- mutex_unlock(&iwdev->pbl_mutex);
- if (status)
- return -ENOMEM;
-
- iwpbl->pbl_allocated = true;
- level = palloc->level;
- pinfo = (level == I40IW_LEVEL_1) ? &palloc->level1 : palloc->level2.leaf;
- pbl = (u64 *)pinfo->addr;
- } else {
- pbl = iwmr->pgaddrmem;
- }
-
- i40iw_copy_user_pgaddrs(iwmr, pbl, level);
-
- if (use_pbles)
- iwmr->pgaddrmem[0] = *pbl;
-
- return 0;
-}
-
-/**
- * i40iw_handle_q_mem - handle memory for qp and cq
- * @iwdev: iwarp device
- * @req: information for q memory management
- * @iwpbl: pble struct
- * @use_pbles: flag to use pble
- */
-static int i40iw_handle_q_mem(struct i40iw_device *iwdev,
- struct i40iw_mem_reg_req *req,
- struct i40iw_pbl *iwpbl,
- bool use_pbles)
-{
- struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
- struct i40iw_mr *iwmr = iwpbl->iwmr;
- struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr;
- struct i40iw_cq_mr *cqmr = &iwpbl->cq_mr;
- struct i40iw_hmc_pble *hmc_p;
- u64 *arr = iwmr->pgaddrmem;
- u32 pg_size;
- int err;
- int total;
- bool ret = true;
-
- total = req->sq_pages + req->rq_pages + req->cq_pages;
- pg_size = iwmr->page_size;
-
- err = i40iw_setup_pbles(iwdev, iwmr, use_pbles);
- if (err)
- return err;
-
- if (use_pbles && (palloc->level != I40IW_LEVEL_1)) {
- i40iw_free_pble(iwdev->pble_rsrc, palloc);
- iwpbl->pbl_allocated = false;
- return -ENOMEM;
- }
-
- if (use_pbles)
- arr = (u64 *)palloc->level1.addr;
-
- if (iwmr->type == IW_MEMREG_TYPE_QP) {
- hmc_p = &qpmr->sq_pbl;
- qpmr->shadow = (dma_addr_t)arr[total];
-
- if (use_pbles) {
- ret = i40iw_check_mem_contiguous(arr, req->sq_pages, pg_size);
- if (ret)
- ret = i40iw_check_mem_contiguous(&arr[req->sq_pages], req->rq_pages, pg_size);
- }
-
- if (!ret) {
- hmc_p->idx = palloc->level1.idx;
- hmc_p = &qpmr->rq_pbl;
- hmc_p->idx = palloc->level1.idx + req->sq_pages;
- } else {
- hmc_p->addr = arr[0];
- hmc_p = &qpmr->rq_pbl;
- hmc_p->addr = arr[req->sq_pages];
- }
- } else { /* CQ */
- hmc_p = &cqmr->cq_pbl;
- cqmr->shadow = (dma_addr_t)arr[total];
-
- if (use_pbles)
- ret = i40iw_check_mem_contiguous(arr, req->cq_pages, pg_size);
-
- if (!ret)
- hmc_p->idx = palloc->level1.idx;
- else
- hmc_p->addr = arr[0];
- }
-
- if (use_pbles && ret) {
- i40iw_free_pble(iwdev->pble_rsrc, palloc);
- iwpbl->pbl_allocated = false;
- }
-
- return err;
-}
-
-/**
- * i40iw_hw_alloc_stag - cqp command to allocate stag
- * @iwdev: iwarp device
- * @iwmr: iwarp mr pointer
- */
-static int i40iw_hw_alloc_stag(struct i40iw_device *iwdev, struct i40iw_mr *iwmr)
-{
- struct i40iw_allocate_stag_info *info;
- struct i40iw_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
- enum i40iw_status_code status;
- int err = 0;
- struct i40iw_cqp_request *cqp_request;
- struct cqp_commands_info *cqp_info;
-
- cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
- if (!cqp_request)
- return -ENOMEM;
-
- cqp_info = &cqp_request->info;
- info = &cqp_info->in.u.alloc_stag.info;
- memset(info, 0, sizeof(*info));
- info->page_size = PAGE_SIZE;
- info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT;
- info->pd_id = iwpd->sc_pd.pd_id;
- info->total_len = iwmr->length;
- info->remote_access = true;
- cqp_info->cqp_cmd = OP_ALLOC_STAG;
- cqp_info->post_sq = 1;
- cqp_info->in.u.alloc_stag.dev = &iwdev->sc_dev;
- cqp_info->in.u.alloc_stag.scratch = (uintptr_t)cqp_request;
-
- status = i40iw_handle_cqp_op(iwdev, cqp_request);
- if (status) {
- err = -ENOMEM;
- i40iw_pr_err("CQP-OP MR Reg fail");
- }
- return err;
-}
-
-/**
- * i40iw_alloc_mr - register stag for fast memory registration
- * @pd: ibpd pointer
- * @mr_type: memory for stag registrion
- * @max_num_sg: man number of pages
- */
-static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
- u32 max_num_sg)
-{
- struct i40iw_pd *iwpd = to_iwpd(pd);
- struct i40iw_device *iwdev = to_iwdev(pd->device);
- struct i40iw_pble_alloc *palloc;
- struct i40iw_pbl *iwpbl;
- struct i40iw_mr *iwmr;
- enum i40iw_status_code status;
- u32 stag;
- int err_code = -ENOMEM;
-
- iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
- if (!iwmr)
- return ERR_PTR(-ENOMEM);
-
- stag = i40iw_create_stag(iwdev);
- if (!stag) {
- err_code = -EOVERFLOW;
- goto err;
- }
- stag &= ~I40IW_CQPSQ_STAG_KEY_MASK;
- iwmr->stag = stag;
- iwmr->ibmr.rkey = stag;
- iwmr->ibmr.lkey = stag;
- iwmr->ibmr.pd = pd;
- iwmr->ibmr.device = pd->device;
- iwpbl = &iwmr->iwpbl;
- iwpbl->iwmr = iwmr;
- iwmr->type = IW_MEMREG_TYPE_MEM;
- palloc = &iwpbl->pble_alloc;
- iwmr->page_cnt = max_num_sg;
- mutex_lock(&iwdev->pbl_mutex);
- status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);
- mutex_unlock(&iwdev->pbl_mutex);
- if (status)
- goto err1;
-
- if (palloc->level != I40IW_LEVEL_1)
- goto err2;
- err_code = i40iw_hw_alloc_stag(iwdev, iwmr);
- if (err_code)
- goto err2;
- iwpbl->pbl_allocated = true;
- i40iw_add_pdusecount(iwpd);
- return &iwmr->ibmr;
-err2:
- i40iw_free_pble(iwdev->pble_rsrc, palloc);
-err1:
- i40iw_free_stag(iwdev, stag);
-err:
- kfree(iwmr);
- return ERR_PTR(err_code);
-}
-
-/**
- * i40iw_set_page - populate pbl list for fmr
- * @ibmr: ib mem to access iwarp mr pointer
- * @addr: page dma address fro pbl list
- */
-static int i40iw_set_page(struct ib_mr *ibmr, u64 addr)
-{
- struct i40iw_mr *iwmr = to_iwmr(ibmr);
- struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
- struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
- u64 *pbl;
-
- if (unlikely(iwmr->npages == iwmr->page_cnt))
- return -ENOMEM;
-
- pbl = (u64 *)palloc->level1.addr;
- pbl[iwmr->npages++] = cpu_to_le64(addr);
- return 0;
-}
-
-/**
- * i40iw_map_mr_sg - map of sg list for fmr
- * @ibmr: ib mem to access iwarp mr pointer
- * @sg: scatter gather list for fmr
- * @sg_nents: number of sg pages
- * @sg_offset: scatter gather offset
- */
-static int i40iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
- int sg_nents, unsigned int *sg_offset)
-{
- struct i40iw_mr *iwmr = to_iwmr(ibmr);
-
- iwmr->npages = 0;
- return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, i40iw_set_page);
-}
-
-/**
- * i40iw_drain_sq - drain the send queue
- * @ibqp: ib qp pointer
- */
-static void i40iw_drain_sq(struct ib_qp *ibqp)
-{
- struct i40iw_qp *iwqp = to_iwqp(ibqp);
- struct i40iw_sc_qp *qp = &iwqp->sc_qp;
-
- if (I40IW_RING_MORE_WORK(qp->qp_uk.sq_ring))
- wait_for_completion(&iwqp->sq_drained);
-}
-
-/**
- * i40iw_drain_rq - drain the receive queue
- * @ibqp: ib qp pointer
- */
-static void i40iw_drain_rq(struct ib_qp *ibqp)
-{
- struct i40iw_qp *iwqp = to_iwqp(ibqp);
- struct i40iw_sc_qp *qp = &iwqp->sc_qp;
-
- if (I40IW_RING_MORE_WORK(qp->qp_uk.rq_ring))
- wait_for_completion(&iwqp->rq_drained);
-}
-
-/**
- * i40iw_hwreg_mr - send cqp command for memory registration
- * @iwdev: iwarp device
- * @iwmr: iwarp mr pointer
- * @access: access for MR
- */
-static int i40iw_hwreg_mr(struct i40iw_device *iwdev,
- struct i40iw_mr *iwmr,
- u16 access)
-{
- struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
- struct i40iw_reg_ns_stag_info *stag_info;
- struct i40iw_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
- struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
- enum i40iw_status_code status;
- int err = 0;
- struct i40iw_cqp_request *cqp_request;
- struct cqp_commands_info *cqp_info;
-
- cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
- if (!cqp_request)
- return -ENOMEM;
-
- cqp_info = &cqp_request->info;
- stag_info = &cqp_info->in.u.mr_reg_non_shared.info;
- memset(stag_info, 0, sizeof(*stag_info));
- stag_info->va = (void *)(unsigned long)iwpbl->user_base;
- stag_info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT;
- stag_info->stag_key = (u8)iwmr->stag;
- stag_info->total_len = iwmr->length;
- stag_info->access_rights = access;
- stag_info->pd_id = iwpd->sc_pd.pd_id;
- stag_info->addr_type = I40IW_ADDR_TYPE_VA_BASED;
- stag_info->page_size = iwmr->page_size;
-
- if (iwpbl->pbl_allocated) {
- if (palloc->level == I40IW_LEVEL_1) {
- stag_info->first_pm_pbl_index = palloc->level1.idx;
- stag_info->chunk_size = 1;
- } else {
- stag_info->first_pm_pbl_index = palloc->level2.root.idx;
- stag_info->chunk_size = 3;
- }
- } else {
- stag_info->reg_addr_pa = iwmr->pgaddrmem[0];
- }
-
- cqp_info->cqp_cmd = OP_MR_REG_NON_SHARED;
- cqp_info->post_sq = 1;
- cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->sc_dev;
- cqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request;
-
- status = i40iw_handle_cqp_op(iwdev, cqp_request);
- if (status) {
- err = -ENOMEM;
- i40iw_pr_err("CQP-OP MR Reg fail");
- }
- return err;
-}
-
-/**
- * i40iw_reg_user_mr - Register a user memory region
- * @pd: ptr of pd
- * @start: virtual start address
- * @length: length of mr
- * @virt: virtual address
- * @acc: access of mr
- * @udata: user data
- */
-static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
- u64 start,
- u64 length,
- u64 virt,
- int acc,
- struct ib_udata *udata)
-{
- struct i40iw_pd *iwpd = to_iwpd(pd);
- struct i40iw_device *iwdev = to_iwdev(pd->device);
- struct i40iw_ucontext *ucontext = rdma_udata_to_drv_context(
- udata, struct i40iw_ucontext, ibucontext);
- struct i40iw_pble_alloc *palloc;
- struct i40iw_pbl *iwpbl;
- struct i40iw_mr *iwmr;
- struct ib_umem *region;
- struct i40iw_mem_reg_req req;
- u32 stag = 0;
- u16 access;
- bool use_pbles = false;
- unsigned long flags;
- int err = -ENOSYS;
- int ret;
-
- if (!udata)
- return ERR_PTR(-EOPNOTSUPP);
-
- if (iwdev->closing)
- return ERR_PTR(-ENODEV);
-
- if (length > I40IW_MAX_MR_SIZE)
- return ERR_PTR(-EINVAL);
- region = ib_umem_get(pd->device, start, length, acc);
- if (IS_ERR(region))
- return (struct ib_mr *)region;
-
- if (ib_copy_from_udata(&req, udata, sizeof(req))) {
- ib_umem_release(region);
- return ERR_PTR(-EFAULT);
- }
-
- iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
- if (!iwmr) {
- ib_umem_release(region);
- return ERR_PTR(-ENOMEM);
- }
-
- iwpbl = &iwmr->iwpbl;
- iwpbl->iwmr = iwmr;
- iwmr->region = region;
- iwmr->ibmr.pd = pd;
- iwmr->ibmr.device = pd->device;
-
- iwmr->page_size = PAGE_SIZE;
- if (req.reg_type == IW_MEMREG_TYPE_MEM)
- iwmr->page_size = ib_umem_find_best_pgsz(region, SZ_4K | SZ_2M,
- virt);
- iwmr->length = region->length;
-
- iwpbl->user_base = virt;
- palloc = &iwpbl->pble_alloc;
-
- iwmr->type = req.reg_type;
- iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size);
-
- switch (req.reg_type) {
- case IW_MEMREG_TYPE_QP:
- use_pbles = ((req.sq_pages + req.rq_pages) > 2);
- err = i40iw_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
- if (err)
- goto error;
- spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
- list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
- iwpbl->on_list = true;
- spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
- break;
- case IW_MEMREG_TYPE_CQ:
- use_pbles = (req.cq_pages > 1);
- err = i40iw_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
- if (err)
- goto error;
-
- spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
- list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
- iwpbl->on_list = true;
- spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
- break;
- case IW_MEMREG_TYPE_MEM:
- use_pbles = (iwmr->page_cnt != 1);
- access = I40IW_ACCESS_FLAGS_LOCALREAD;
-
- err = i40iw_setup_pbles(iwdev, iwmr, use_pbles);
- if (err)
- goto error;
-
- if (use_pbles) {
- ret = i40iw_check_mr_contiguous(palloc, iwmr->page_size);
- if (ret) {
- i40iw_free_pble(iwdev->pble_rsrc, palloc);
- iwpbl->pbl_allocated = false;
- }
- }
-
- access |= i40iw_get_user_access(acc);
- stag = i40iw_create_stag(iwdev);
- if (!stag) {
- err = -ENOMEM;
- goto error;
- }
-
- iwmr->stag = stag;
- iwmr->ibmr.rkey = stag;
- iwmr->ibmr.lkey = stag;
-
- err = i40iw_hwreg_mr(iwdev, iwmr, access);
- if (err) {
- i40iw_free_stag(iwdev, stag);
- goto error;
- }
-
- break;
- default:
- goto error;
- }
-
- iwmr->type = req.reg_type;
- if (req.reg_type == IW_MEMREG_TYPE_MEM)
- i40iw_add_pdusecount(iwpd);
- return &iwmr->ibmr;
-
-error:
- if (palloc->level != I40IW_LEVEL_0 && iwpbl->pbl_allocated)
- i40iw_free_pble(iwdev->pble_rsrc, palloc);
- ib_umem_release(region);
- kfree(iwmr);
- return ERR_PTR(err);
-}
-
-/**
- * i40iw_reg_phys_mr - register kernel physical memory
- * @pd: ibpd pointer
- * @addr: physical address of memory to register
- * @size: size of memory to register
- * @acc: Access rights
- * @iova_start: start of virtual address for physical buffers
- */
-struct ib_mr *i40iw_reg_phys_mr(struct ib_pd *pd,
- u64 addr,
- u64 size,
- int acc,
- u64 *iova_start)
-{
- struct i40iw_pd *iwpd = to_iwpd(pd);
- struct i40iw_device *iwdev = to_iwdev(pd->device);
- struct i40iw_pbl *iwpbl;
- struct i40iw_mr *iwmr;
- enum i40iw_status_code status;
- u32 stag;
- u16 access = I40IW_ACCESS_FLAGS_LOCALREAD;
- int ret;
-
- iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
- if (!iwmr)
- return ERR_PTR(-ENOMEM);
- iwmr->ibmr.pd = pd;
- iwmr->ibmr.device = pd->device;
- iwpbl = &iwmr->iwpbl;
- iwpbl->iwmr = iwmr;
- iwmr->type = IW_MEMREG_TYPE_MEM;
- iwpbl->user_base = *iova_start;
- stag = i40iw_create_stag(iwdev);
- if (!stag) {
- ret = -EOVERFLOW;
- goto err;
- }
- access |= i40iw_get_user_access(acc);
- iwmr->stag = stag;
- iwmr->ibmr.rkey = stag;
- iwmr->ibmr.lkey = stag;
- iwmr->page_cnt = 1;
- iwmr->pgaddrmem[0] = addr;
- iwmr->length = size;
- status = i40iw_hwreg_mr(iwdev, iwmr, access);
- if (status) {
- i40iw_free_stag(iwdev, stag);
- ret = -ENOMEM;
- goto err;
- }
-
- i40iw_add_pdusecount(iwpd);
- return &iwmr->ibmr;
- err:
- kfree(iwmr);
- return ERR_PTR(ret);
-}
-
-/**
- * i40iw_get_dma_mr - register physical mem
- * @pd: ptr of pd
- * @acc: access for memory
- */
-static struct ib_mr *i40iw_get_dma_mr(struct ib_pd *pd, int acc)
-{
- u64 kva = 0;
-
- return i40iw_reg_phys_mr(pd, 0, 0, acc, &kva);
-}
-
-/**
- * i40iw_del_memlist - Deleting pbl list entries for CQ/QP
- * @iwmr: iwmr for IB's user page addresses
- * @ucontext: ptr to user context
- */
-static void i40iw_del_memlist(struct i40iw_mr *iwmr,
- struct i40iw_ucontext *ucontext)
-{
- struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
- unsigned long flags;
-
- switch (iwmr->type) {
- case IW_MEMREG_TYPE_CQ:
- spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
- if (iwpbl->on_list) {
- iwpbl->on_list = false;
- list_del(&iwpbl->list);
- }
- spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
- break;
- case IW_MEMREG_TYPE_QP:
- spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
- if (iwpbl->on_list) {
- iwpbl->on_list = false;
- list_del(&iwpbl->list);
- }
- spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
- break;
- default:
- break;
- }
-}
-
-/**
- * i40iw_dereg_mr - deregister mr
- * @ib_mr: mr ptr for dereg
- * @udata: user data
- */
-static int i40iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
-{
- struct ib_pd *ibpd = ib_mr->pd;
- struct i40iw_pd *iwpd = to_iwpd(ibpd);
- struct i40iw_mr *iwmr = to_iwmr(ib_mr);
- struct i40iw_device *iwdev = to_iwdev(ib_mr->device);
- enum i40iw_status_code status;
- struct i40iw_dealloc_stag_info *info;
- struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
- struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
- struct i40iw_cqp_request *cqp_request;
- struct cqp_commands_info *cqp_info;
- u32 stag_idx;
-
- ib_umem_release(iwmr->region);
-
- if (iwmr->type != IW_MEMREG_TYPE_MEM) {
- /* region is released. only test for userness. */
- if (iwmr->region) {
- struct i40iw_ucontext *ucontext =
- rdma_udata_to_drv_context(
- udata,
- struct i40iw_ucontext,
- ibucontext);
-
- i40iw_del_memlist(iwmr, ucontext);
- }
- if (iwpbl->pbl_allocated && iwmr->type != IW_MEMREG_TYPE_QP)
- i40iw_free_pble(iwdev->pble_rsrc, palloc);
- kfree(iwmr);
- return 0;
- }
-
- cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
- if (!cqp_request)
- return -ENOMEM;
-
- cqp_info = &cqp_request->info;
- info = &cqp_info->in.u.dealloc_stag.info;
- memset(info, 0, sizeof(*info));
-
- info->pd_id = cpu_to_le32(iwpd->sc_pd.pd_id & 0x00007fff);
- info->stag_idx = RS_64_1(ib_mr->rkey, I40IW_CQPSQ_STAG_IDX_SHIFT);
- stag_idx = info->stag_idx;
- info->mr = true;
- if (iwpbl->pbl_allocated)
- info->dealloc_pbl = true;
-
- cqp_info->cqp_cmd = OP_DEALLOC_STAG;
- cqp_info->post_sq = 1;
- cqp_info->in.u.dealloc_stag.dev = &iwdev->sc_dev;
- cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
- status = i40iw_handle_cqp_op(iwdev, cqp_request);
- if (status)
- i40iw_pr_err("CQP-OP dealloc failed for stag_idx = 0x%x\n", stag_idx);
- i40iw_rem_pdusecount(iwpd, iwdev);
- i40iw_free_stag(iwdev, iwmr->stag);
- if (iwpbl->pbl_allocated)
- i40iw_free_pble(iwdev->pble_rsrc, palloc);
- kfree(iwmr);
- return 0;
-}
-
-/*
- * hw_rev_show
- */
-static ssize_t hw_rev_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct i40iw_ib_device *iwibdev =
- rdma_device_to_drv_device(dev, struct i40iw_ib_device, ibdev);
- u32 hw_rev = iwibdev->iwdev->sc_dev.hw_rev;
-
- return sysfs_emit(buf, "%x\n", hw_rev);
-}
-static DEVICE_ATTR_RO(hw_rev);
-
-/*
- * hca_type_show
- */
-static ssize_t hca_type_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sysfs_emit(buf, "I40IW\n");
-}
-static DEVICE_ATTR_RO(hca_type);
-
-/*
- * board_id_show
- */
-static ssize_t board_id_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sysfs_emit(buf, "%.*s\n", 32, "I40IW Board ID");
-}
-static DEVICE_ATTR_RO(board_id);
-
-static struct attribute *i40iw_dev_attributes[] = {
- &dev_attr_hw_rev.attr,
- &dev_attr_hca_type.attr,
- &dev_attr_board_id.attr,
- NULL
-};
-
-static const struct attribute_group i40iw_attr_group = {
- .attrs = i40iw_dev_attributes,
-};
-
-/**
- * i40iw_copy_sg_list - copy sg list for qp
- * @sg_list: copied into sg_list
- * @sgl: copy from sgl
- * @num_sges: count of sg entries
- */
-static void i40iw_copy_sg_list(struct i40iw_sge *sg_list, struct ib_sge *sgl, int num_sges)
-{
- unsigned int i;
-
- for (i = 0; (i < num_sges) && (i < I40IW_MAX_WQ_FRAGMENT_COUNT); i++) {
- sg_list[i].tag_off = sgl[i].addr;
- sg_list[i].len = sgl[i].length;
- sg_list[i].stag = sgl[i].lkey;
- }
-}
-
-/**
- * i40iw_post_send - kernel application wr
- * @ibqp: qp ptr for wr
- * @ib_wr: work request ptr
- * @bad_wr: return of bad wr if err
- */
-static int i40iw_post_send(struct ib_qp *ibqp,
- const struct ib_send_wr *ib_wr,
- const struct ib_send_wr **bad_wr)
-{
- struct i40iw_qp *iwqp;
- struct i40iw_qp_uk *ukqp;
- struct i40iw_post_sq_info info;
- enum i40iw_status_code ret;
- int err = 0;
- unsigned long flags;
- bool inv_stag;
-
- iwqp = (struct i40iw_qp *)ibqp;
- ukqp = &iwqp->sc_qp.qp_uk;
-
- spin_lock_irqsave(&iwqp->lock, flags);
-
- if (iwqp->flush_issued) {
- err = -EINVAL;
- goto out;
- }
-
- while (ib_wr) {
- inv_stag = false;
- memset(&info, 0, sizeof(info));
- info.wr_id = (u64)(ib_wr->wr_id);
- if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all)
- info.signaled = true;
- if (ib_wr->send_flags & IB_SEND_FENCE)
- info.read_fence = true;
-
- switch (ib_wr->opcode) {
- case IB_WR_SEND:
- case IB_WR_SEND_WITH_INV:
- if (ib_wr->opcode == IB_WR_SEND) {
- if (ib_wr->send_flags & IB_SEND_SOLICITED)
- info.op_type = I40IW_OP_TYPE_SEND_SOL;
- else
- info.op_type = I40IW_OP_TYPE_SEND;
- } else {
- if (ib_wr->send_flags & IB_SEND_SOLICITED)
- info.op_type = I40IW_OP_TYPE_SEND_SOL_INV;
- else
- info.op_type = I40IW_OP_TYPE_SEND_INV;
- }
-
- if (ib_wr->send_flags & IB_SEND_INLINE) {
- info.op.inline_send.data = (void *)(unsigned long)ib_wr->sg_list[0].addr;
- info.op.inline_send.len = ib_wr->sg_list[0].length;
- ret = ukqp->ops.iw_inline_send(ukqp, &info, ib_wr->ex.invalidate_rkey, false);
- } else {
- info.op.send.num_sges = ib_wr->num_sge;
- info.op.send.sg_list = (struct i40iw_sge *)ib_wr->sg_list;
- ret = ukqp->ops.iw_send(ukqp, &info, ib_wr->ex.invalidate_rkey, false);
- }
-
- if (ret) {
- if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
- err = -ENOMEM;
- else
- err = -EINVAL;
- }
- break;
- case IB_WR_RDMA_WRITE:
- info.op_type = I40IW_OP_TYPE_RDMA_WRITE;
-
- if (ib_wr->send_flags & IB_SEND_INLINE) {
- info.op.inline_rdma_write.data = (void *)(unsigned long)ib_wr->sg_list[0].addr;
- info.op.inline_rdma_write.len = ib_wr->sg_list[0].length;
- info.op.inline_rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
- info.op.inline_rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
- ret = ukqp->ops.iw_inline_rdma_write(ukqp, &info, false);
- } else {
- info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;
- info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
- info.op.rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
- info.op.rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
- ret = ukqp->ops.iw_rdma_write(ukqp, &info, false);
- }
-
- if (ret) {
- if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
- err = -ENOMEM;
- else
- err = -EINVAL;
- }
- break;
- case IB_WR_RDMA_READ_WITH_INV:
- inv_stag = true;
- fallthrough;
- case IB_WR_RDMA_READ:
- if (ib_wr->num_sge > I40IW_MAX_SGE_RD) {
- err = -EINVAL;
- break;
- }
- info.op_type = I40IW_OP_TYPE_RDMA_READ;
- info.op.rdma_read.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
- info.op.rdma_read.rem_addr.stag = rdma_wr(ib_wr)->rkey;
- info.op.rdma_read.lo_addr.tag_off = ib_wr->sg_list->addr;
- info.op.rdma_read.lo_addr.stag = ib_wr->sg_list->lkey;
- info.op.rdma_read.lo_addr.len = ib_wr->sg_list->length;
- ret = ukqp->ops.iw_rdma_read(ukqp, &info, inv_stag, false);
- if (ret) {
- if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
- err = -ENOMEM;
- else
- err = -EINVAL;
- }
- break;
- case IB_WR_LOCAL_INV:
- info.op_type = I40IW_OP_TYPE_INV_STAG;
- info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey;
- ret = ukqp->ops.iw_stag_local_invalidate(ukqp, &info, true);
- if (ret)
- err = -ENOMEM;
- break;
- case IB_WR_REG_MR:
- {
- struct i40iw_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr);
- int flags = reg_wr(ib_wr)->access;
- struct i40iw_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc;
- struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev;
- struct i40iw_fast_reg_stag_info info;
-
- memset(&info, 0, sizeof(info));
- info.access_rights = I40IW_ACCESS_FLAGS_LOCALREAD;
- info.access_rights |= i40iw_get_user_access(flags);
- info.stag_key = reg_wr(ib_wr)->key & 0xff;
- info.stag_idx = reg_wr(ib_wr)->key >> 8;
- info.page_size = reg_wr(ib_wr)->mr->page_size;
- info.wr_id = ib_wr->wr_id;
-
- info.addr_type = I40IW_ADDR_TYPE_VA_BASED;
- info.va = (void *)(uintptr_t)iwmr->ibmr.iova;
- info.total_len = iwmr->ibmr.length;
- info.reg_addr_pa = *(u64 *)palloc->level1.addr;
- info.first_pm_pbl_index = palloc->level1.idx;
- info.local_fence = ib_wr->send_flags & IB_SEND_FENCE;
- info.signaled = ib_wr->send_flags & IB_SEND_SIGNALED;
-
- if (iwmr->npages > I40IW_MIN_PAGES_PER_FMR)
- info.chunk_size = 1;
-
- ret = dev->iw_priv_qp_ops->iw_mr_fast_register(&iwqp->sc_qp, &info, true);
- if (ret)
- err = -ENOMEM;
- break;
- }
- default:
- err = -EINVAL;
- i40iw_pr_err(" upost_send bad opcode = 0x%x\n",
- ib_wr->opcode);
- break;
- }
-
- if (err)
- break;
- ib_wr = ib_wr->next;
- }
-
-out:
- if (err)
- *bad_wr = ib_wr;
- else
- ukqp->ops.iw_qp_post_wr(ukqp);
- spin_unlock_irqrestore(&iwqp->lock, flags);
-
- return err;
-}
-
-/**
- * i40iw_post_recv - post receive wr for kernel application
- * @ibqp: ib qp pointer
- * @ib_wr: work request for receive
- * @bad_wr: bad wr caused an error
- */
-static int i40iw_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *ib_wr,
- const struct ib_recv_wr **bad_wr)
-{
- struct i40iw_qp *iwqp;
- struct i40iw_qp_uk *ukqp;
- struct i40iw_post_rq_info post_recv;
- struct i40iw_sge sg_list[I40IW_MAX_WQ_FRAGMENT_COUNT];
- enum i40iw_status_code ret = 0;
- unsigned long flags;
- int err = 0;
-
- iwqp = (struct i40iw_qp *)ibqp;
- ukqp = &iwqp->sc_qp.qp_uk;
-
- memset(&post_recv, 0, sizeof(post_recv));
- spin_lock_irqsave(&iwqp->lock, flags);
-
- if (iwqp->flush_issued) {
- err = -EINVAL;
- goto out;
- }
-
- while (ib_wr) {
- post_recv.num_sges = ib_wr->num_sge;
- post_recv.wr_id = ib_wr->wr_id;
- i40iw_copy_sg_list(sg_list, ib_wr->sg_list, ib_wr->num_sge);
- post_recv.sg_list = sg_list;
- ret = ukqp->ops.iw_post_receive(ukqp, &post_recv);
- if (ret) {
- i40iw_pr_err(" post_recv err %d\n", ret);
- if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
- err = -ENOMEM;
- else
- err = -EINVAL;
- *bad_wr = ib_wr;
- goto out;
- }
- ib_wr = ib_wr->next;
- }
- out:
- spin_unlock_irqrestore(&iwqp->lock, flags);
- return err;
-}
-
-/**
- * i40iw_poll_cq - poll cq for completion (kernel apps)
- * @ibcq: cq to poll
- * @num_entries: number of entries to poll
- * @entry: wr of entry completed
- */
-static int i40iw_poll_cq(struct ib_cq *ibcq,
- int num_entries,
- struct ib_wc *entry)
-{
- struct i40iw_cq *iwcq;
- int cqe_count = 0;
- struct i40iw_cq_poll_info cq_poll_info;
- enum i40iw_status_code ret;
- struct i40iw_cq_uk *ukcq;
- struct i40iw_sc_qp *qp;
- struct i40iw_qp *iwqp;
- unsigned long flags;
-
- iwcq = (struct i40iw_cq *)ibcq;
- ukcq = &iwcq->sc_cq.cq_uk;
-
- spin_lock_irqsave(&iwcq->lock, flags);
- while (cqe_count < num_entries) {
- ret = ukcq->ops.iw_cq_poll_completion(ukcq, &cq_poll_info);
- if (ret == I40IW_ERR_QUEUE_EMPTY) {
- break;
- } else if (ret == I40IW_ERR_QUEUE_DESTROYED) {
- continue;
- } else if (ret) {
- if (!cqe_count)
- cqe_count = -1;
- break;
- }
- entry->wc_flags = 0;
- entry->wr_id = cq_poll_info.wr_id;
- if (cq_poll_info.error) {
- entry->status = IB_WC_WR_FLUSH_ERR;
- entry->vendor_err = cq_poll_info.major_err << 16 | cq_poll_info.minor_err;
- } else {
- entry->status = IB_WC_SUCCESS;
- }
-
- switch (cq_poll_info.op_type) {
- case I40IW_OP_TYPE_RDMA_WRITE:
- entry->opcode = IB_WC_RDMA_WRITE;
- break;
- case I40IW_OP_TYPE_RDMA_READ_INV_STAG:
- case I40IW_OP_TYPE_RDMA_READ:
- entry->opcode = IB_WC_RDMA_READ;
- break;
- case I40IW_OP_TYPE_SEND_SOL:
- case I40IW_OP_TYPE_SEND_SOL_INV:
- case I40IW_OP_TYPE_SEND_INV:
- case I40IW_OP_TYPE_SEND:
- entry->opcode = IB_WC_SEND;
- break;
- case I40IW_OP_TYPE_REC:
- entry->opcode = IB_WC_RECV;
- break;
- default:
- entry->opcode = IB_WC_RECV;
- break;
- }
-
- entry->ex.imm_data = 0;
- qp = (struct i40iw_sc_qp *)cq_poll_info.qp_handle;
- entry->qp = (struct ib_qp *)qp->back_qp;
- entry->src_qp = cq_poll_info.qp_id;
- iwqp = (struct i40iw_qp *)qp->back_qp;
- if (iwqp->iwarp_state > I40IW_QP_STATE_RTS) {
- if (!I40IW_RING_MORE_WORK(qp->qp_uk.sq_ring))
- complete(&iwqp->sq_drained);
- if (!I40IW_RING_MORE_WORK(qp->qp_uk.rq_ring))
- complete(&iwqp->rq_drained);
- }
- entry->byte_len = cq_poll_info.bytes_xfered;
- entry++;
- cqe_count++;
- }
- spin_unlock_irqrestore(&iwcq->lock, flags);
- return cqe_count;
-}
-
-/**
- * i40iw_req_notify_cq - arm cq kernel application
- * @ibcq: cq to arm
- * @notify_flags: notofication flags
- */
-static int i40iw_req_notify_cq(struct ib_cq *ibcq,
- enum ib_cq_notify_flags notify_flags)
-{
- struct i40iw_cq *iwcq;
- struct i40iw_cq_uk *ukcq;
- unsigned long flags;
- enum i40iw_completion_notify cq_notify = IW_CQ_COMPL_EVENT;
-
- iwcq = (struct i40iw_cq *)ibcq;
- ukcq = &iwcq->sc_cq.cq_uk;
- if (notify_flags == IB_CQ_SOLICITED)
- cq_notify = IW_CQ_COMPL_SOLICITED;
- spin_lock_irqsave(&iwcq->lock, flags);
- ukcq->ops.iw_cq_request_notification(ukcq, cq_notify);
- spin_unlock_irqrestore(&iwcq->lock, flags);
- return 0;
-}
-
-/**
- * i40iw_port_immutable - return port's immutable data
- * @ibdev: ib dev struct
- * @port_num: port number
- * @immutable: immutable data for the port return
- */
-static int i40iw_port_immutable(struct ib_device *ibdev, u32 port_num,
- struct ib_port_immutable *immutable)
-{
- struct ib_port_attr attr;
- int err;
-
- immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
-
- err = ib_query_port(ibdev, port_num, &attr);
-
- if (err)
- return err;
-
- immutable->gid_tbl_len = attr.gid_tbl_len;
-
- return 0;
-}
-
-static const char * const i40iw_hw_stat_names[] = {
- // 32bit names
- [I40IW_HW_STAT_INDEX_IP4RXDISCARD] = "ip4InDiscards",
- [I40IW_HW_STAT_INDEX_IP4RXTRUNC] = "ip4InTruncatedPkts",
- [I40IW_HW_STAT_INDEX_IP4TXNOROUTE] = "ip4OutNoRoutes",
- [I40IW_HW_STAT_INDEX_IP6RXDISCARD] = "ip6InDiscards",
- [I40IW_HW_STAT_INDEX_IP6RXTRUNC] = "ip6InTruncatedPkts",
- [I40IW_HW_STAT_INDEX_IP6TXNOROUTE] = "ip6OutNoRoutes",
- [I40IW_HW_STAT_INDEX_TCPRTXSEG] = "tcpRetransSegs",
- [I40IW_HW_STAT_INDEX_TCPRXOPTERR] = "tcpInOptErrors",
- [I40IW_HW_STAT_INDEX_TCPRXPROTOERR] = "tcpInProtoErrors",
- // 64bit names
- [I40IW_HW_STAT_INDEX_IP4RXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
- "ip4InOctets",
- [I40IW_HW_STAT_INDEX_IP4RXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
- "ip4InPkts",
- [I40IW_HW_STAT_INDEX_IP4RXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
- "ip4InReasmRqd",
- [I40IW_HW_STAT_INDEX_IP4RXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
- "ip4InMcastPkts",
- [I40IW_HW_STAT_INDEX_IP4TXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
- "ip4OutOctets",
- [I40IW_HW_STAT_INDEX_IP4TXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
- "ip4OutPkts",
- [I40IW_HW_STAT_INDEX_IP4TXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
- "ip4OutSegRqd",
- [I40IW_HW_STAT_INDEX_IP4TXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
- "ip4OutMcastPkts",
- [I40IW_HW_STAT_INDEX_IP6RXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
- "ip6InOctets",
- [I40IW_HW_STAT_INDEX_IP6RXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
- "ip6InPkts",
- [I40IW_HW_STAT_INDEX_IP6RXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
- "ip6InReasmRqd",
- [I40IW_HW_STAT_INDEX_IP6RXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
- "ip6InMcastPkts",
- [I40IW_HW_STAT_INDEX_IP6TXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
- "ip6OutOctets",
- [I40IW_HW_STAT_INDEX_IP6TXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
- "ip6OutPkts",
- [I40IW_HW_STAT_INDEX_IP6TXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
- "ip6OutSegRqd",
- [I40IW_HW_STAT_INDEX_IP6TXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
- "ip6OutMcastPkts",
- [I40IW_HW_STAT_INDEX_TCPRXSEGS + I40IW_HW_STAT_INDEX_MAX_32] =
- "tcpInSegs",
- [I40IW_HW_STAT_INDEX_TCPTXSEG + I40IW_HW_STAT_INDEX_MAX_32] =
- "tcpOutSegs",
- [I40IW_HW_STAT_INDEX_RDMARXRDS + I40IW_HW_STAT_INDEX_MAX_32] =
- "iwInRdmaReads",
- [I40IW_HW_STAT_INDEX_RDMARXSNDS + I40IW_HW_STAT_INDEX_MAX_32] =
- "iwInRdmaSends",
- [I40IW_HW_STAT_INDEX_RDMARXWRS + I40IW_HW_STAT_INDEX_MAX_32] =
- "iwInRdmaWrites",
- [I40IW_HW_STAT_INDEX_RDMATXRDS + I40IW_HW_STAT_INDEX_MAX_32] =
- "iwOutRdmaReads",
- [I40IW_HW_STAT_INDEX_RDMATXSNDS + I40IW_HW_STAT_INDEX_MAX_32] =
- "iwOutRdmaSends",
- [I40IW_HW_STAT_INDEX_RDMATXWRS + I40IW_HW_STAT_INDEX_MAX_32] =
- "iwOutRdmaWrites",
- [I40IW_HW_STAT_INDEX_RDMAVBND + I40IW_HW_STAT_INDEX_MAX_32] =
- "iwRdmaBnd",
- [I40IW_HW_STAT_INDEX_RDMAVINV + I40IW_HW_STAT_INDEX_MAX_32] =
- "iwRdmaInv"
-};
-
-static void i40iw_get_dev_fw_str(struct ib_device *dev, char *str)
-{
- struct i40iw_device *iwdev = to_iwdev(dev);
-
- snprintf(str, IB_FW_VERSION_NAME_MAX, "%llu.%llu",
- i40iw_fw_major_ver(&iwdev->sc_dev),
- i40iw_fw_minor_ver(&iwdev->sc_dev));
-}
-
-/**
- * i40iw_alloc_hw_stats - Allocate a hw stats structure
- * @ibdev: device pointer from stack
- * @port_num: port number
- */
-static struct rdma_hw_stats *i40iw_alloc_hw_stats(struct ib_device *ibdev,
- u32 port_num)
-{
- struct i40iw_device *iwdev = to_iwdev(ibdev);
- struct i40iw_sc_dev *dev = &iwdev->sc_dev;
- int num_counters = I40IW_HW_STAT_INDEX_MAX_32 +
- I40IW_HW_STAT_INDEX_MAX_64;
- unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN;
-
- BUILD_BUG_ON(ARRAY_SIZE(i40iw_hw_stat_names) !=
- (I40IW_HW_STAT_INDEX_MAX_32 +
- I40IW_HW_STAT_INDEX_MAX_64));
-
- /*
- * PFs get the default update lifespan, but VFs only update once
- * per second
- */
- if (!dev->is_pf)
- lifespan = 1000;
- return rdma_alloc_hw_stats_struct(i40iw_hw_stat_names, num_counters,
- lifespan);
-}
-
-/**
- * i40iw_get_hw_stats - Populates the rdma_hw_stats structure
- * @ibdev: device pointer from stack
- * @stats: stats pointer from stack
- * @port_num: port number
- * @index: which hw counter the stack is requesting we update
- */
-static int i40iw_get_hw_stats(struct ib_device *ibdev,
- struct rdma_hw_stats *stats,
- u32 port_num, int index)
-{
- struct i40iw_device *iwdev = to_iwdev(ibdev);
- struct i40iw_sc_dev *dev = &iwdev->sc_dev;
- struct i40iw_vsi_pestat *devstat = iwdev->vsi.pestat;
- struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats;
-
- if (dev->is_pf) {
- i40iw_hw_stats_read_all(devstat, &devstat->hw_stats);
- } else {
- if (i40iw_vchnl_vf_get_pe_stats(dev, &devstat->hw_stats))
- return -ENOSYS;
- }
-
- memcpy(&stats->value[0], hw_stats, sizeof(*hw_stats));
-
- return stats->num_counters;
-}
-
-/**
- * i40iw_query_gid - Query port GID
- * @ibdev: device pointer from stack
- * @port: port number
- * @index: Entry index
- * @gid: Global ID
- */
-static int i40iw_query_gid(struct ib_device *ibdev,
- u32 port,
- int index,
- union ib_gid *gid)
-{
- struct i40iw_device *iwdev = to_iwdev(ibdev);
-
- memset(gid->raw, 0, sizeof(gid->raw));
- ether_addr_copy(gid->raw, iwdev->netdev->dev_addr);
- return 0;
-}
-
-static const struct ib_device_ops i40iw_dev_ops = {
- .owner = THIS_MODULE,
- .driver_id = RDMA_DRIVER_I40IW,
- /* NOTE: Older kernels wrongly use 0 for the uverbs_abi_ver */
- .uverbs_abi_ver = I40IW_ABI_VER,
-
- .alloc_hw_stats = i40iw_alloc_hw_stats,
- .alloc_mr = i40iw_alloc_mr,
- .alloc_pd = i40iw_alloc_pd,
- .alloc_ucontext = i40iw_alloc_ucontext,
- .create_cq = i40iw_create_cq,
- .create_qp = i40iw_create_qp,
- .dealloc_pd = i40iw_dealloc_pd,
- .dealloc_ucontext = i40iw_dealloc_ucontext,
- .dereg_mr = i40iw_dereg_mr,
- .destroy_cq = i40iw_destroy_cq,
- .destroy_qp = i40iw_destroy_qp,
- .drain_rq = i40iw_drain_rq,
- .drain_sq = i40iw_drain_sq,
- .get_dev_fw_str = i40iw_get_dev_fw_str,
- .get_dma_mr = i40iw_get_dma_mr,
- .get_hw_stats = i40iw_get_hw_stats,
- .get_port_immutable = i40iw_port_immutable,
- .iw_accept = i40iw_accept,
- .iw_add_ref = i40iw_qp_add_ref,
- .iw_connect = i40iw_connect,
- .iw_create_listen = i40iw_create_listen,
- .iw_destroy_listen = i40iw_destroy_listen,
- .iw_get_qp = i40iw_get_qp,
- .iw_reject = i40iw_reject,
- .iw_rem_ref = i40iw_qp_rem_ref,
- .map_mr_sg = i40iw_map_mr_sg,
- .mmap = i40iw_mmap,
- .modify_qp = i40iw_modify_qp,
- .poll_cq = i40iw_poll_cq,
- .post_recv = i40iw_post_recv,
- .post_send = i40iw_post_send,
- .query_device = i40iw_query_device,
- .query_gid = i40iw_query_gid,
- .query_port = i40iw_query_port,
- .query_qp = i40iw_query_qp,
- .reg_user_mr = i40iw_reg_user_mr,
- .req_notify_cq = i40iw_req_notify_cq,
- INIT_RDMA_OBJ_SIZE(ib_pd, i40iw_pd, ibpd),
- INIT_RDMA_OBJ_SIZE(ib_cq, i40iw_cq, ibcq),
- INIT_RDMA_OBJ_SIZE(ib_ucontext, i40iw_ucontext, ibucontext),
-};
-
-/**
- * i40iw_init_rdma_device - initialization of iwarp device
- * @iwdev: iwarp device
- */
-static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev)
-{
- struct i40iw_ib_device *iwibdev;
- struct net_device *netdev = iwdev->netdev;
- struct pci_dev *pcidev = iwdev->hw.pcidev;
-
- iwibdev = ib_alloc_device(i40iw_ib_device, ibdev);
- if (!iwibdev) {
- i40iw_pr_err("iwdev == NULL\n");
- return NULL;
- }
- iwdev->iwibdev = iwibdev;
- iwibdev->iwdev = iwdev;
-
- iwibdev->ibdev.node_type = RDMA_NODE_RNIC;
- ether_addr_copy((u8 *)&iwibdev->ibdev.node_guid, netdev->dev_addr);
-
- iwibdev->ibdev.phys_port_cnt = 1;
- iwibdev->ibdev.num_comp_vectors = iwdev->ceqs_count;
- iwibdev->ibdev.dev.parent = &pcidev->dev;
- memcpy(iwibdev->ibdev.iw_ifname, netdev->name,
- sizeof(iwibdev->ibdev.iw_ifname));
- ib_set_device_ops(&iwibdev->ibdev, &i40iw_dev_ops);
-
- return iwibdev;
-}
-
-/**
- * i40iw_port_ibevent - indicate port event
- * @iwdev: iwarp device
- */
-void i40iw_port_ibevent(struct i40iw_device *iwdev)
-{
- struct i40iw_ib_device *iwibdev = iwdev->iwibdev;
- struct ib_event event;
-
- event.device = &iwibdev->ibdev;
- event.element.port_num = 1;
- event.event = iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
- ib_dispatch_event(&event);
-}
-
-/**
- * i40iw_destroy_rdma_device - destroy rdma device and free resources
- * @iwibdev: IB device ptr
- */
-void i40iw_destroy_rdma_device(struct i40iw_ib_device *iwibdev)
-{
- ib_unregister_device(&iwibdev->ibdev);
- wait_event_timeout(iwibdev->iwdev->close_wq,
- !atomic64_read(&iwibdev->iwdev->use_count),
- I40IW_EVENT_TIMEOUT);
- ib_dealloc_device(&iwibdev->ibdev);
-}
-
-/**
- * i40iw_register_rdma_device - register iwarp device to IB
- * @iwdev: iwarp device
- */
-int i40iw_register_rdma_device(struct i40iw_device *iwdev)
-{
- int ret;
- struct i40iw_ib_device *iwibdev;
-
- iwdev->iwibdev = i40iw_init_rdma_device(iwdev);
- if (!iwdev->iwibdev)
- return -ENOMEM;
- iwibdev = iwdev->iwibdev;
- rdma_set_device_sysfs_group(&iwibdev->ibdev, &i40iw_attr_group);
- ret = ib_device_set_netdev(&iwibdev->ibdev, iwdev->netdev, 1);
- if (ret)
- goto error;
-
- dma_set_max_seg_size(&iwdev->hw.pcidev->dev, UINT_MAX);
- ret = ib_register_device(&iwibdev->ibdev, "i40iw%d", &iwdev->hw.pcidev->dev);
- if (ret)
- goto error;
-
- return 0;
-error:
- ib_dealloc_device(&iwdev->iwibdev->ibdev);
- return ret;
-}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.h b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
deleted file mode 100644
index bab71f3e5637..000000000000
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.h
+++ /dev/null
@@ -1,179 +0,0 @@
-/*******************************************************************************
-*
-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
-*
-* This software is available to you under a choice of one of two
-* licenses. You may choose to be licensed under the terms of the GNU
-* General Public License (GPL) Version 2, available from the file
-* COPYING in the main directory of this source tree, or the
-* OpenFabrics.org BSD license below:
-*
-* Redistribution and use in source and binary forms, with or
-* without modification, are permitted provided that the following
-* conditions are met:
-*
-* - Redistributions of source code must retain the above
-* copyright notice, this list of conditions and the following
-* disclaimer.
-*
-* - Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials
-* provided with the distribution.
-*
-* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-* SOFTWARE.
-*
-*******************************************************************************/
-
-#ifndef I40IW_VERBS_H
-#define I40IW_VERBS_H
-
-struct i40iw_ucontext {
- struct ib_ucontext ibucontext;
- struct i40iw_device *iwdev;
- struct list_head cq_reg_mem_list;
- spinlock_t cq_reg_mem_list_lock; /* memory list for cq's */
- struct list_head qp_reg_mem_list;
- spinlock_t qp_reg_mem_list_lock; /* memory list for qp's */
- int abi_ver;
-};
-
-struct i40iw_pd {
- struct ib_pd ibpd;
- struct i40iw_sc_pd sc_pd;
- atomic_t usecount;
-};
-
-struct i40iw_hmc_pble {
- union {
- u32 idx;
- dma_addr_t addr;
- };
-};
-
-struct i40iw_cq_mr {
- struct i40iw_hmc_pble cq_pbl;
- dma_addr_t shadow;
-};
-
-struct i40iw_qp_mr {
- struct i40iw_hmc_pble sq_pbl;
- struct i40iw_hmc_pble rq_pbl;
- dma_addr_t shadow;
- struct page *sq_page;
-};
-
-struct i40iw_pbl {
- struct list_head list;
- union {
- struct i40iw_qp_mr qp_mr;
- struct i40iw_cq_mr cq_mr;
- };
-
- bool pbl_allocated;
- bool on_list;
- u64 user_base;
- struct i40iw_pble_alloc pble_alloc;
- struct i40iw_mr *iwmr;
-};
-
-#define MAX_SAVE_PAGE_ADDRS 4
-struct i40iw_mr {
- union {
- struct ib_mr ibmr;
- struct ib_mw ibmw;
- };
- struct ib_umem *region;
- u16 type;
- u32 page_cnt;
- u64 page_size;
- u32 npages;
- u32 stag;
- u64 length;
- u64 pgaddrmem[MAX_SAVE_PAGE_ADDRS];
- struct i40iw_pbl iwpbl;
-};
-
-struct i40iw_cq {
- struct ib_cq ibcq;
- struct i40iw_sc_cq sc_cq;
- u16 cq_head;
- u16 cq_size;
- u16 cq_number;
- bool user_mode;
- u32 polled_completions;
- u32 cq_mem_size;
- struct i40iw_dma_mem kmem;
- spinlock_t lock; /* for poll cq */
- struct i40iw_pbl *iwpbl;
-};
-
-struct disconn_work {
- struct work_struct work;
- struct i40iw_qp *iwqp;
-};
-
-struct iw_cm_id;
-struct ietf_mpa_frame;
-struct i40iw_ud_file;
-
-struct i40iw_qp_kmode {
- struct i40iw_dma_mem dma_mem;
- u64 *wrid_mem;
-};
-
-struct i40iw_qp {
- struct ib_qp ibqp;
- struct i40iw_sc_qp sc_qp;
- struct i40iw_device *iwdev;
- struct i40iw_cq *iwscq;
- struct i40iw_cq *iwrcq;
- struct i40iw_pd *iwpd;
- struct i40iw_qp_host_ctx_info ctx_info;
- struct i40iwarp_offload_info iwarp_info;
- void *allocated_buffer;
- refcount_t refcount;
- struct iw_cm_id *cm_id;
- void *cm_node;
- struct ib_mr *lsmm_mr;
- struct work_struct work;
- enum ib_qp_state ibqp_state;
- u32 iwarp_state;
- u32 qp_mem_size;
- u32 last_aeq;
- atomic_t close_timer_started;
- spinlock_t lock; /* for post work requests */
- struct i40iw_qp_context *iwqp_context;
- void *pbl_vbase;
- dma_addr_t pbl_pbase;
- struct page *page;
- u8 active_conn:1;
- u8 user_mode:1;
- u8 hte_added:1;
- u8 flush_issued:1;
- u8 destroyed:1;
- u8 sig_all:1;
- u8 pau_mode:1;
- u8 rsvd:1;
- u16 term_sq_flush_code;
- u16 term_rq_flush_code;
- u8 hw_iwarp_state;
- u8 hw_tcp_state;
- struct i40iw_qp_kmode kqp;
- struct i40iw_dma_mem host_ctx;
- struct timer_list terminate_timer;
- struct i40iw_pbl iwpbl;
- struct i40iw_dma_mem q2_ctx_mem;
- struct i40iw_dma_mem ietf_mem;
- struct completion sq_drained;
- struct completion rq_drained;
- struct completion free_qp;
-};
-#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_vf.c b/drivers/infiniband/hw/i40iw/i40iw_vf.c
deleted file mode 100644
index e33d4810965c..000000000000
--- a/drivers/infiniband/hw/i40iw/i40iw_vf.c
+++ /dev/null
@@ -1,85 +0,0 @@
-/*******************************************************************************
-*
-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
-*
-* This software is available to you under a choice of one of two
-* licenses. You may choose to be licensed under the terms of the GNU
-* General Public License (GPL) Version 2, available from the file
-* COPYING in the main directory of this source tree, or the
-* OpenFabrics.org BSD license below:
-*
-* Redistribution and use in source and binary forms, with or
-* without modification, are permitted provided that the following
-* conditions are met:
-*
-* - Redistributions of source code must retain the above
-* copyright notice, this list of conditions and the following
-* disclaimer.
-*
-* - Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials
-* provided with the distribution.
-*
-* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-* SOFTWARE.
-*
-*******************************************************************************/
-
-#include "i40iw_osdep.h"
-#include "i40iw_register.h"
-#include "i40iw_status.h"
-#include "i40iw_hmc.h"
-#include "i40iw_d.h"
-#include "i40iw_type.h"
-#include "i40iw_p.h"
-#include "i40iw_vf.h"
-
-/**
- * i40iw_manage_vf_pble_bp - manage vf pble
- * @cqp: cqp for cqp' sq wqe
- * @info: pble info
- * @scratch: pointer for completion
- * @post_sq: to post and ring
- */
-enum i40iw_status_code i40iw_manage_vf_pble_bp(struct i40iw_sc_cqp *cqp,
- struct i40iw_manage_vf_pble_info *info,
- u64 scratch,
- bool post_sq)
-{
- u64 *wqe;
- u64 temp, header, pd_pl_pba = 0;
-
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
- if (!wqe)
- return I40IW_ERR_RING_FULL;
-
- temp = LS_64(info->pd_entry_cnt, I40IW_CQPSQ_MVPBP_PD_ENTRY_CNT) |
- LS_64(info->first_pd_index, I40IW_CQPSQ_MVPBP_FIRST_PD_INX) |
- LS_64(info->sd_index, I40IW_CQPSQ_MVPBP_SD_INX);
- set_64bit_val(wqe, 16, temp);
-
- header = LS_64((info->inv_pd_ent ? 1 : 0), I40IW_CQPSQ_MVPBP_INV_PD_ENT) |
- LS_64(I40IW_CQP_OP_MANAGE_VF_PBLE_BP, I40IW_CQPSQ_OPCODE) |
- LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
- set_64bit_val(wqe, 24, header);
-
- pd_pl_pba = LS_64(info->pd_pl_pba >> 3, I40IW_CQPSQ_MVPBP_PD_PLPBA);
- set_64bit_val(wqe, 32, pd_pl_pba);
-
- i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE VF_PBLE_BP WQE", wqe, I40IW_CQP_WQE_SIZE * 8);
-
- if (post_sq)
- i40iw_sc_cqp_post_sq(cqp);
- return 0;
-}
-
-const struct i40iw_vf_cqp_ops iw_vf_cqp_ops = {
- i40iw_manage_vf_pble_bp
-};
diff --git a/drivers/infiniband/hw/i40iw/i40iw_vf.h b/drivers/infiniband/hw/i40iw/i40iw_vf.h
deleted file mode 100644
index 4359559ece9c..000000000000
--- a/drivers/infiniband/hw/i40iw/i40iw_vf.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*******************************************************************************
-*
-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
-*
-* This software is available to you under a choice of one of two
-* licenses. You may choose to be licensed under the terms of the GNU
-* General Public License (GPL) Version 2, available from the file
-* COPYING in the main directory of this source tree, or the
-* OpenFabrics.org BSD license below:
-*
-* Redistribution and use in source and binary forms, with or
-* without modification, are permitted provided that the following
-* conditions are met:
-*
-* - Redistributions of source code must retain the above
-* copyright notice, this list of conditions and the following
-* disclaimer.
-*
-* - Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials
-* provided with the distribution.
-*
-* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-* SOFTWARE.
-*
-*******************************************************************************/
-
-#ifndef I40IW_VF_H
-#define I40IW_VF_H
-
-struct i40iw_sc_cqp;
-
-struct i40iw_manage_vf_pble_info {
- u32 sd_index;
- u16 first_pd_index;
- u16 pd_entry_cnt;
- u8 inv_pd_ent;
- u64 pd_pl_pba;
-};
-
-struct i40iw_vf_cqp_ops {
- enum i40iw_status_code (*manage_vf_pble_bp)(struct i40iw_sc_cqp *,
- struct i40iw_manage_vf_pble_info *,
- u64,
- bool);
-};
-
-enum i40iw_status_code i40iw_manage_vf_pble_bp(struct i40iw_sc_cqp *cqp,
- struct i40iw_manage_vf_pble_info *info,
- u64 scratch,
- bool post_sq);
-
-extern const struct i40iw_vf_cqp_ops iw_vf_cqp_ops;
-
-#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
deleted file mode 100644
index e34a1522132c..000000000000
--- a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
+++ /dev/null
@@ -1,759 +0,0 @@
-/*******************************************************************************
-*
-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
-*
-* This software is available to you under a choice of one of two
-* licenses. You may choose to be licensed under the terms of the GNU
-* General Public License (GPL) Version 2, available from the file
-* COPYING in the main directory of this source tree, or the
-* OpenFabrics.org BSD license below:
-*
-* Redistribution and use in source and binary forms, with or
-* without modification, are permitted provided that the following
-* conditions are met:
-*
-* - Redistributions of source code must retain the above
-* copyright notice, this list of conditions and the following
-* disclaimer.
-*
-* - Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials
-* provided with the distribution.
-*
-* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-* SOFTWARE.
-*
-*******************************************************************************/
-
-#include "i40iw_osdep.h"
-#include "i40iw_register.h"
-#include "i40iw_status.h"
-#include "i40iw_hmc.h"
-#include "i40iw_d.h"
-#include "i40iw_type.h"
-#include "i40iw_p.h"
-#include "i40iw_virtchnl.h"
-
-/**
- * vchnl_vf_send_get_ver_req - Request Channel version
- * @dev: IWARP device pointer
- * @vchnl_req: Virtual channel message request pointer
- */
-static enum i40iw_status_code vchnl_vf_send_get_ver_req(struct i40iw_sc_dev *dev,
- struct i40iw_virtchnl_req *vchnl_req)
-{
- enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY;
- struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg;
-
- if (!dev->vchnl_up)
- return ret_code;
-
- memset(vchnl_msg, 0, sizeof(*vchnl_msg));
- vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req;
- vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg);
- vchnl_msg->iw_op_code = I40IW_VCHNL_OP_GET_VER;
- vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_GET_VER_V0;
- ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len);
- if (ret_code)
- i40iw_debug(dev, I40IW_DEBUG_VIRT,
- "%s: virt channel send failed 0x%x\n", __func__, ret_code);
- return ret_code;
-}
-
-/**
- * vchnl_vf_send_get_hmc_fcn_req - Request HMC Function from VF
- * @dev: IWARP device pointer
- * @vchnl_req: Virtual channel message request pointer
- */
-static enum i40iw_status_code vchnl_vf_send_get_hmc_fcn_req(struct i40iw_sc_dev *dev,
- struct i40iw_virtchnl_req *vchnl_req)
-{
- enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY;
- struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg;
-
- if (!dev->vchnl_up)
- return ret_code;
-
- memset(vchnl_msg, 0, sizeof(*vchnl_msg));
- vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req;
- vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg);
- vchnl_msg->iw_op_code = I40IW_VCHNL_OP_GET_HMC_FCN;
- vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_GET_HMC_FCN_V0;
- ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len);
- if (ret_code)
- i40iw_debug(dev, I40IW_DEBUG_VIRT,
- "%s: virt channel send failed 0x%x\n", __func__, ret_code);
- return ret_code;
-}
-
-/**
- * vchnl_vf_send_get_pe_stats_req - Request PE stats from VF
- * @dev: IWARP device pointer
- * @vchnl_req: Virtual channel message request pointer
- */
-static enum i40iw_status_code vchnl_vf_send_get_pe_stats_req(struct i40iw_sc_dev *dev,
- struct i40iw_virtchnl_req *vchnl_req)
-{
- enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY;
- struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg;
-
- if (!dev->vchnl_up)
- return ret_code;
-
- memset(vchnl_msg, 0, sizeof(*vchnl_msg));
- vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req;
- vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg) + sizeof(struct i40iw_dev_hw_stats) - 1;
- vchnl_msg->iw_op_code = I40IW_VCHNL_OP_GET_STATS;
- vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_GET_STATS_V0;
- ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len);
- if (ret_code)
- i40iw_debug(dev, I40IW_DEBUG_VIRT,
- "%s: virt channel send failed 0x%x\n", __func__, ret_code);
- return ret_code;
-}
-
-/*
- * vchnl_vf_send_add_hmc_objs_req - Add HMC objects
- * @dev: IWARP device pointer
- * @vchnl_req: Virtual channel message request pointer
- */
-static enum i40iw_status_code vchnl_vf_send_add_hmc_objs_req(struct i40iw_sc_dev *dev,
- struct i40iw_virtchnl_req *vchnl_req,
- enum i40iw_hmc_rsrc_type rsrc_type,
- u32 start_index,
- u32 rsrc_count)
-{
- enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY;
- struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg;
- struct i40iw_virtchnl_hmc_obj_range *add_hmc_obj;
-
- if (!dev->vchnl_up)
- return ret_code;
-
- add_hmc_obj = (struct i40iw_virtchnl_hmc_obj_range *)vchnl_msg->iw_chnl_buf;
- memset(vchnl_msg, 0, sizeof(*vchnl_msg));
- memset(add_hmc_obj, 0, sizeof(*add_hmc_obj));
- vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req;
- vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg) + sizeof(struct i40iw_virtchnl_hmc_obj_range) - 1;
- vchnl_msg->iw_op_code = I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE;
- vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE_V0;
- add_hmc_obj->obj_type = (u16)rsrc_type;
- add_hmc_obj->start_index = start_index;
- add_hmc_obj->obj_count = rsrc_count;
- ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len);
- if (ret_code)
- i40iw_debug(dev, I40IW_DEBUG_VIRT,
- "%s: virt channel send failed 0x%x\n", __func__, ret_code);
- return ret_code;
-}
-
-/**
- * vchnl_vf_send_del_hmc_objs_req - del HMC objects
- * @dev: IWARP device pointer
- * @vchnl_req: Virtual channel message request pointer
- * @rsrc_type: resource type to delete
- * @start_index: starting index for resource
- * @rsrc_count: number of resource type to delete
- */
-static enum i40iw_status_code vchnl_vf_send_del_hmc_objs_req(struct i40iw_sc_dev *dev,
- struct i40iw_virtchnl_req *vchnl_req,
- enum i40iw_hmc_rsrc_type rsrc_type,
- u32 start_index,
- u32 rsrc_count)
-{
- enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY;
- struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg;
- struct i40iw_virtchnl_hmc_obj_range *add_hmc_obj;
-
- if (!dev->vchnl_up)
- return ret_code;
-
- add_hmc_obj = (struct i40iw_virtchnl_hmc_obj_range *)vchnl_msg->iw_chnl_buf;
- memset(vchnl_msg, 0, sizeof(*vchnl_msg));
- memset(add_hmc_obj, 0, sizeof(*add_hmc_obj));
- vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req;
- vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg) + sizeof(struct i40iw_virtchnl_hmc_obj_range) - 1;
- vchnl_msg->iw_op_code = I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE;
- vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE_V0;
- add_hmc_obj->obj_type = (u16)rsrc_type;
- add_hmc_obj->start_index = start_index;
- add_hmc_obj->obj_count = rsrc_count;
- ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len);
- if (ret_code)
- i40iw_debug(dev, I40IW_DEBUG_VIRT,
- "%s: virt channel send failed 0x%x\n", __func__, ret_code);
- return ret_code;
-}
-
-/**
- * vchnl_pf_send_get_ver_resp - Send channel version to VF
- * @dev: IWARP device pointer
- * @vf_id: Virtual function ID associated with the message
- * @vchnl_msg: Virtual channel message buffer pointer
- */
-static void vchnl_pf_send_get_ver_resp(struct i40iw_sc_dev *dev,
- u32 vf_id,
- struct i40iw_virtchnl_op_buf *vchnl_msg)
-{
- enum i40iw_status_code ret_code;
- u8 resp_buffer[sizeof(struct i40iw_virtchnl_resp_buf) + sizeof(u32) - 1];
- struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)resp_buffer;
-
- memset(resp_buffer, 0, sizeof(*resp_buffer));
- vchnl_msg_resp->iw_chnl_op_ctx = vchnl_msg->iw_chnl_op_ctx;
- vchnl_msg_resp->iw_chnl_buf_len = sizeof(resp_buffer);
- vchnl_msg_resp->iw_op_ret_code = I40IW_SUCCESS;
- *((u32 *)vchnl_msg_resp->iw_chnl_buf) = I40IW_VCHNL_CHNL_VER_V0;
- ret_code = dev->vchnl_if.vchnl_send(dev, vf_id, resp_buffer, sizeof(resp_buffer));
- if (ret_code)
- i40iw_debug(dev, I40IW_DEBUG_VIRT,
- "%s: virt channel send failed 0x%x\n", __func__, ret_code);
-}
-
-/**
- * vchnl_pf_send_get_hmc_fcn_resp - Send HMC Function to VF
- * @dev: IWARP device pointer
- * @vf_id: Virtual function ID associated with the message
- * @vchnl_msg: Virtual channel message buffer pointer
- * @hmc_fcn: HMC function index pointer
- */
-static void vchnl_pf_send_get_hmc_fcn_resp(struct i40iw_sc_dev *dev,
- u32 vf_id,
- struct i40iw_virtchnl_op_buf *vchnl_msg,
- u16 hmc_fcn)
-{
- enum i40iw_status_code ret_code;
- u8 resp_buffer[sizeof(struct i40iw_virtchnl_resp_buf) + sizeof(u16) - 1];
- struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)resp_buffer;
-
- memset(resp_buffer, 0, sizeof(*resp_buffer));
- vchnl_msg_resp->iw_chnl_op_ctx = vchnl_msg->iw_chnl_op_ctx;
- vchnl_msg_resp->iw_chnl_buf_len = sizeof(resp_buffer);
- vchnl_msg_resp->iw_op_ret_code = I40IW_SUCCESS;
- *((u16 *)vchnl_msg_resp->iw_chnl_buf) = hmc_fcn;
- ret_code = dev->vchnl_if.vchnl_send(dev, vf_id, resp_buffer, sizeof(resp_buffer));
- if (ret_code)
- i40iw_debug(dev, I40IW_DEBUG_VIRT,
- "%s: virt channel send failed 0x%x\n", __func__, ret_code);
-}
-
-/**
- * vchnl_pf_send_get_pe_stats_resp - Send PE Stats to VF
- * @dev: IWARP device pointer
- * @vf_id: Virtual function ID associated with the message
- * @vchnl_msg: Virtual channel message buffer pointer
- * @hw_stats: HW Stats struct
- */
-
-static void vchnl_pf_send_get_pe_stats_resp(struct i40iw_sc_dev *dev,
- u32 vf_id,
- struct i40iw_virtchnl_op_buf *vchnl_msg,
- struct i40iw_dev_hw_stats *hw_stats)
-{
- enum i40iw_status_code ret_code;
- u8 resp_buffer[sizeof(struct i40iw_virtchnl_resp_buf) + sizeof(struct i40iw_dev_hw_stats) - 1];
- struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)resp_buffer;
-
- memset(resp_buffer, 0, sizeof(*resp_buffer));
- vchnl_msg_resp->iw_chnl_op_ctx = vchnl_msg->iw_chnl_op_ctx;
- vchnl_msg_resp->iw_chnl_buf_len = sizeof(resp_buffer);
- vchnl_msg_resp->iw_op_ret_code = I40IW_SUCCESS;
- *((struct i40iw_dev_hw_stats *)vchnl_msg_resp->iw_chnl_buf) = *hw_stats;
- ret_code = dev->vchnl_if.vchnl_send(dev, vf_id, resp_buffer, sizeof(resp_buffer));
- if (ret_code)
- i40iw_debug(dev, I40IW_DEBUG_VIRT,
- "%s: virt channel send failed 0x%x\n", __func__, ret_code);
-}
-
-/**
- * vchnl_pf_send_error_resp - Send an error response to VF
- * @dev: IWARP device pointer
- * @vf_id: Virtual function ID associated with the message
- * @vchnl_msg: Virtual channel message buffer pointer
- * @op_ret_code: I40IW_ERR_* status code
- */
-static void vchnl_pf_send_error_resp(struct i40iw_sc_dev *dev, u32 vf_id,
- struct i40iw_virtchnl_op_buf *vchnl_msg,
- u16 op_ret_code)
-{
- enum i40iw_status_code ret_code;
- u8 resp_buffer[sizeof(struct i40iw_virtchnl_resp_buf)];
- struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)resp_buffer;
-
- memset(resp_buffer, 0, sizeof(resp_buffer));
- vchnl_msg_resp->iw_chnl_op_ctx = vchnl_msg->iw_chnl_op_ctx;
- vchnl_msg_resp->iw_chnl_buf_len = sizeof(resp_buffer);
- vchnl_msg_resp->iw_op_ret_code = (u16)op_ret_code;
- ret_code = dev->vchnl_if.vchnl_send(dev, vf_id, resp_buffer, sizeof(resp_buffer));
- if (ret_code)
- i40iw_debug(dev, I40IW_DEBUG_VIRT,
- "%s: virt channel send failed 0x%x\n", __func__, ret_code);
-}
-
-/**
- * pf_cqp_get_hmc_fcn_callback - Callback for Get HMC Fcn
- * @dev: IWARP device pointer
- * @callback_param: unused CQP callback parameter
- * @cqe_info: CQE information pointer
- */
-static void pf_cqp_get_hmc_fcn_callback(struct i40iw_sc_dev *dev, void *callback_param,
- struct i40iw_ccq_cqe_info *cqe_info)
-{
- struct i40iw_vfdev *vf_dev = callback_param;
- struct i40iw_virt_mem vf_dev_mem;
-
- if (cqe_info->error) {
- i40iw_debug(dev, I40IW_DEBUG_VIRT,
- "CQP Completion Error on Get HMC Function. Maj = 0x%04x, Minor = 0x%04x\n",
- cqe_info->maj_err_code, cqe_info->min_err_code);
- dev->vf_dev[vf_dev->iw_vf_idx] = NULL;
- vchnl_pf_send_error_resp(dev, vf_dev->vf_id, &vf_dev->vf_msg_buffer.vchnl_msg,
- (u16)I40IW_ERR_CQP_COMPL_ERROR);
- vf_dev_mem.va = vf_dev;
- vf_dev_mem.size = sizeof(*vf_dev);
- i40iw_free_virt_mem(dev->hw, &vf_dev_mem);
- } else {
- i40iw_debug(dev, I40IW_DEBUG_VIRT,
- "CQP Completion Operation Return information = 0x%08x\n",
- cqe_info->op_ret_val);
- vf_dev->pmf_index = (u16)cqe_info->op_ret_val;
- vf_dev->msg_count--;
- vchnl_pf_send_get_hmc_fcn_resp(dev,
- vf_dev->vf_id,
- &vf_dev->vf_msg_buffer.vchnl_msg,
- vf_dev->pmf_index);
- }
-}
-
-/**
- * pf_add_hmc_obj_callback - Callback for Add HMC Object
- * @work_vf_dev: pointer to the VF Device
- */
-static void pf_add_hmc_obj_callback(void *work_vf_dev)
-{
- struct i40iw_vfdev *vf_dev = (struct i40iw_vfdev *)work_vf_dev;
- struct i40iw_hmc_info *hmc_info = &vf_dev->hmc_info;
- struct i40iw_virtchnl_op_buf *vchnl_msg = &vf_dev->vf_msg_buffer.vchnl_msg;
- struct i40iw_hmc_create_obj_info info;
- struct i40iw_virtchnl_hmc_obj_range *add_hmc_obj;
- enum i40iw_status_code ret_code;
-
- if (!vf_dev->pf_hmc_initialized) {
- ret_code = i40iw_pf_init_vfhmc(vf_dev->pf_dev, (u8)vf_dev->pmf_index, NULL);
- if (ret_code)
- goto add_out;
- vf_dev->pf_hmc_initialized = true;
- }
-
- add_hmc_obj = (struct i40iw_virtchnl_hmc_obj_range *)vchnl_msg->iw_chnl_buf;
-
- memset(&info, 0, sizeof(info));
- info.hmc_info = hmc_info;
- info.is_pf = false;
- info.rsrc_type = (u32)add_hmc_obj->obj_type;
- info.entry_type = (info.rsrc_type == I40IW_HMC_IW_PBLE) ? I40IW_SD_TYPE_PAGED : I40IW_SD_TYPE_DIRECT;
- info.start_idx = add_hmc_obj->start_index;
- info.count = add_hmc_obj->obj_count;
- i40iw_debug(vf_dev->pf_dev, I40IW_DEBUG_VIRT,
- "I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE. Add %u type %u objects\n",
- info.count, info.rsrc_type);
- ret_code = i40iw_sc_create_hmc_obj(vf_dev->pf_dev, &info);
- if (!ret_code)
- vf_dev->hmc_info.hmc_obj[add_hmc_obj->obj_type].cnt = add_hmc_obj->obj_count;
-add_out:
- vf_dev->msg_count--;
- vchnl_pf_send_error_resp(vf_dev->pf_dev, vf_dev->vf_id, vchnl_msg, (u16)ret_code);
-}
-
-/**
- * pf_del_hmc_obj_callback - Callback for delete HMC Object
- * @work_vf_dev: pointer to the VF Device
- */
-static void pf_del_hmc_obj_callback(void *work_vf_dev)
-{
- struct i40iw_vfdev *vf_dev = (struct i40iw_vfdev *)work_vf_dev;
- struct i40iw_hmc_info *hmc_info = &vf_dev->hmc_info;
- struct i40iw_virtchnl_op_buf *vchnl_msg = &vf_dev->vf_msg_buffer.vchnl_msg;
- struct i40iw_hmc_del_obj_info info;
- struct i40iw_virtchnl_hmc_obj_range *del_hmc_obj;
- enum i40iw_status_code ret_code = I40IW_SUCCESS;
-
- if (!vf_dev->pf_hmc_initialized)
- goto del_out;
-
- del_hmc_obj = (struct i40iw_virtchnl_hmc_obj_range *)vchnl_msg->iw_chnl_buf;
-
- memset(&info, 0, sizeof(info));
- info.hmc_info = hmc_info;
- info.is_pf = false;
- info.rsrc_type = (u32)del_hmc_obj->obj_type;
- info.start_idx = del_hmc_obj->start_index;
- info.count = del_hmc_obj->obj_count;
- i40iw_debug(vf_dev->pf_dev, I40IW_DEBUG_VIRT,
- "I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE. Delete %u type %u objects\n",
- info.count, info.rsrc_type);
- ret_code = i40iw_sc_del_hmc_obj(vf_dev->pf_dev, &info, false);
-del_out:
- vf_dev->msg_count--;
- vchnl_pf_send_error_resp(vf_dev->pf_dev, vf_dev->vf_id, vchnl_msg, (u16)ret_code);
-}
-
-/**
- * i40iw_vf_init_pestat - Initialize stats for VF
- * @dev: pointer to the VF Device
- * @stats: Statistics structure pointer
- * @index: Stats index
- */
-static void i40iw_vf_init_pestat(struct i40iw_sc_dev *dev, struct i40iw_vsi_pestat *stats, u16 index)
-{
- stats->hw = dev->hw;
- i40iw_hw_stats_init(stats, (u8)index, false);
- spin_lock_init(&stats->lock);
-}
-
-/**
- * i40iw_vchnl_recv_pf - Receive PF virtual channel messages
- * @dev: IWARP device pointer
- * @vf_id: Virtual function ID associated with the message
- * @msg: Virtual channel message buffer pointer
- * @len: Length of the virtual channels message
- */
-enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev,
- u32 vf_id,
- u8 *msg,
- u16 len)
-{
- struct i40iw_virtchnl_op_buf *vchnl_msg = (struct i40iw_virtchnl_op_buf *)msg;
- struct i40iw_vfdev *vf_dev = NULL;
- struct i40iw_hmc_fcn_info hmc_fcn_info;
- u16 iw_vf_idx;
- u16 first_avail_iw_vf = I40IW_MAX_PE_ENABLED_VF_COUNT;
- struct i40iw_virt_mem vf_dev_mem;
- struct i40iw_virtchnl_work_info work_info;
- struct i40iw_vsi_pestat *stats;
- enum i40iw_status_code ret_code;
-
- if (!dev || !msg || !len)
- return I40IW_ERR_PARAM;
-
- if (!dev->vchnl_up)
- return I40IW_ERR_NOT_READY;
- if (vchnl_msg->iw_op_code == I40IW_VCHNL_OP_GET_VER) {
- vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg);
- return I40IW_SUCCESS;
- }
- for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT; iw_vf_idx++) {
- if (!dev->vf_dev[iw_vf_idx]) {
- if (first_avail_iw_vf == I40IW_MAX_PE_ENABLED_VF_COUNT)
- first_avail_iw_vf = iw_vf_idx;
- continue;
- }
- if (dev->vf_dev[iw_vf_idx]->vf_id == vf_id) {
- vf_dev = dev->vf_dev[iw_vf_idx];
- break;
- }
- }
- if (vf_dev) {
- if (!vf_dev->msg_count) {
- vf_dev->msg_count++;
- } else {
- i40iw_debug(dev, I40IW_DEBUG_VIRT,
- "VF%u already has a channel message in progress.\n",
- vf_id);
- return I40IW_SUCCESS;
- }
- }
- switch (vchnl_msg->iw_op_code) {
- case I40IW_VCHNL_OP_GET_HMC_FCN:
- if (!vf_dev &&
- (first_avail_iw_vf != I40IW_MAX_PE_ENABLED_VF_COUNT)) {
- ret_code = i40iw_allocate_virt_mem(dev->hw, &vf_dev_mem, sizeof(struct i40iw_vfdev) +
- (sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX));
- if (!ret_code) {
- vf_dev = vf_dev_mem.va;
- vf_dev->stats_initialized = false;
- vf_dev->pf_dev = dev;
- vf_dev->msg_count = 1;
- vf_dev->vf_id = vf_id;
- vf_dev->iw_vf_idx = first_avail_iw_vf;
- vf_dev->pf_hmc_initialized = false;
- vf_dev->hmc_info.hmc_obj = (struct i40iw_hmc_obj_info *)(&vf_dev[1]);
- i40iw_debug(dev, I40IW_DEBUG_VIRT,
- "vf_dev %p, hmc_info %p, hmc_obj %p\n",
- vf_dev, &vf_dev->hmc_info, vf_dev->hmc_info.hmc_obj);
- dev->vf_dev[first_avail_iw_vf] = vf_dev;
- iw_vf_idx = first_avail_iw_vf;
- } else {
- i40iw_debug(dev, I40IW_DEBUG_VIRT,
- "VF%u Unable to allocate a VF device structure.\n",
- vf_id);
- vchnl_pf_send_error_resp(dev, vf_id, vchnl_msg, (u16)I40IW_ERR_NO_MEMORY);
- return I40IW_SUCCESS;
- }
- memcpy(&vf_dev->vf_msg_buffer.vchnl_msg, vchnl_msg, len);
- hmc_fcn_info.callback_fcn = pf_cqp_get_hmc_fcn_callback;
- hmc_fcn_info.vf_id = vf_id;
- hmc_fcn_info.iw_vf_idx = vf_dev->iw_vf_idx;
- hmc_fcn_info.cqp_callback_param = vf_dev;
- hmc_fcn_info.free_fcn = false;
- ret_code = i40iw_cqp_manage_hmc_fcn_cmd(dev, &hmc_fcn_info);
- if (ret_code)
- i40iw_debug(dev, I40IW_DEBUG_VIRT,
- "VF%u error CQP HMC Function operation.\n",
- vf_id);
- i40iw_vf_init_pestat(dev, &vf_dev->pestat, vf_dev->pmf_index);
- vf_dev->stats_initialized = true;
- } else {
- if (vf_dev) {
- vf_dev->msg_count--;
- vchnl_pf_send_get_hmc_fcn_resp(dev, vf_id, vchnl_msg, vf_dev->pmf_index);
- } else {
- vchnl_pf_send_error_resp(dev, vf_id, vchnl_msg,
- (u16)I40IW_ERR_NO_MEMORY);
- }
- }
- break;
- case I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE:
- if (!vf_dev)
- return I40IW_ERR_BAD_PTR;
- work_info.worker_vf_dev = vf_dev;
- work_info.callback_fcn = pf_add_hmc_obj_callback;
- memcpy(&vf_dev->vf_msg_buffer.vchnl_msg, vchnl_msg, len);
- i40iw_cqp_spawn_worker(dev, &work_info, vf_dev->iw_vf_idx);
- break;
- case I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE:
- if (!vf_dev)
- return I40IW_ERR_BAD_PTR;
- work_info.worker_vf_dev = vf_dev;
- work_info.callback_fcn = pf_del_hmc_obj_callback;
- memcpy(&vf_dev->vf_msg_buffer.vchnl_msg, vchnl_msg, len);
- i40iw_cqp_spawn_worker(dev, &work_info, vf_dev->iw_vf_idx);
- break;
- case I40IW_VCHNL_OP_GET_STATS:
- if (!vf_dev)
- return I40IW_ERR_BAD_PTR;
- stats = &vf_dev->pestat;
- i40iw_hw_stats_read_all(stats, &stats->hw_stats);
- vf_dev->msg_count--;
- vchnl_pf_send_get_pe_stats_resp(dev, vf_id, vchnl_msg, &stats->hw_stats);
- break;
- default:
- i40iw_debug(dev, I40IW_DEBUG_VIRT,
- "40iw_vchnl_recv_pf: Invalid OpCode 0x%x\n",
- vchnl_msg->iw_op_code);
- vchnl_pf_send_error_resp(dev, vf_id,
- vchnl_msg, (u16)I40IW_ERR_NOT_IMPLEMENTED);
- }
- return I40IW_SUCCESS;
-}
-
-/**
- * i40iw_vchnl_recv_vf - Receive VF virtual channel messages
- * @dev: IWARP device pointer
- * @vf_id: Virtual function ID associated with the message
- * @msg: Virtual channel message buffer pointer
- * @len: Length of the virtual channels message
- */
-enum i40iw_status_code i40iw_vchnl_recv_vf(struct i40iw_sc_dev *dev,
- u32 vf_id,
- u8 *msg,
- u16 len)
-{
- struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)msg;
- struct i40iw_virtchnl_req *vchnl_req;
-
- vchnl_req = (struct i40iw_virtchnl_req *)(uintptr_t)vchnl_msg_resp->iw_chnl_op_ctx;
- vchnl_req->ret_code = (enum i40iw_status_code)vchnl_msg_resp->iw_op_ret_code;
- if (len == (sizeof(*vchnl_msg_resp) + vchnl_req->parm_len - 1)) {
- if (vchnl_req->parm_len && vchnl_req->parm)
- memcpy(vchnl_req->parm, vchnl_msg_resp->iw_chnl_buf, vchnl_req->parm_len);
- i40iw_debug(dev, I40IW_DEBUG_VIRT,
- "%s: Got response, data size %u\n", __func__,
- vchnl_req->parm_len);
- } else {
- i40iw_debug(dev, I40IW_DEBUG_VIRT,
- "%s: error length on response, Got %u, expected %u\n", __func__,
- len, (u32)(sizeof(*vchnl_msg_resp) + vchnl_req->parm_len - 1));
- }
-
- return I40IW_SUCCESS;
-}
-
-/**
- * i40iw_vchnl_vf_get_ver - Request Channel version
- * @dev: IWARP device pointer
- * @vchnl_ver: Virtual channel message version pointer
- */
-enum i40iw_status_code i40iw_vchnl_vf_get_ver(struct i40iw_sc_dev *dev,
- u32 *vchnl_ver)
-{
- struct i40iw_virtchnl_req vchnl_req;
- enum i40iw_status_code ret_code;
-
- if (!i40iw_vf_clear_to_send(dev))
- return I40IW_ERR_TIMEOUT;
- memset(&vchnl_req, 0, sizeof(vchnl_req));
- vchnl_req.dev = dev;
- vchnl_req.parm = vchnl_ver;
- vchnl_req.parm_len = sizeof(*vchnl_ver);
- vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;
-
- ret_code = vchnl_vf_send_get_ver_req(dev, &vchnl_req);
- if (ret_code) {
- i40iw_debug(dev, I40IW_DEBUG_VIRT,
- "%s Send message failed 0x%0x\n", __func__, ret_code);
- return ret_code;
- }
- ret_code = i40iw_vf_wait_vchnl_resp(dev);
- if (ret_code)
- return ret_code;
- else
- return vchnl_req.ret_code;
-}
-
-/**
- * i40iw_vchnl_vf_get_hmc_fcn - Request HMC Function
- * @dev: IWARP device pointer
- * @hmc_fcn: HMC function index pointer
- */
-enum i40iw_status_code i40iw_vchnl_vf_get_hmc_fcn(struct i40iw_sc_dev *dev,
- u16 *hmc_fcn)
-{
- struct i40iw_virtchnl_req vchnl_req;
- enum i40iw_status_code ret_code;
-
- if (!i40iw_vf_clear_to_send(dev))
- return I40IW_ERR_TIMEOUT;
- memset(&vchnl_req, 0, sizeof(vchnl_req));
- vchnl_req.dev = dev;
- vchnl_req.parm = hmc_fcn;
- vchnl_req.parm_len = sizeof(*hmc_fcn);
- vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;
-
- ret_code = vchnl_vf_send_get_hmc_fcn_req(dev, &vchnl_req);
- if (ret_code) {
- i40iw_debug(dev, I40IW_DEBUG_VIRT,
- "%s Send message failed 0x%0x\n", __func__, ret_code);
- return ret_code;
- }
- ret_code = i40iw_vf_wait_vchnl_resp(dev);
- if (ret_code)
- return ret_code;
- else
- return vchnl_req.ret_code;
-}
-
-/**
- * i40iw_vchnl_vf_add_hmc_objs - Add HMC Object
- * @dev: IWARP device pointer
- * @rsrc_type: HMC Resource type
- * @start_index: Starting index of the objects to be added
- * @rsrc_count: Number of resources to be added
- */
-enum i40iw_status_code i40iw_vchnl_vf_add_hmc_objs(struct i40iw_sc_dev *dev,
- enum i40iw_hmc_rsrc_type rsrc_type,
- u32 start_index,
- u32 rsrc_count)
-{
- struct i40iw_virtchnl_req vchnl_req;
- enum i40iw_status_code ret_code;
-
- if (!i40iw_vf_clear_to_send(dev))
- return I40IW_ERR_TIMEOUT;
- memset(&vchnl_req, 0, sizeof(vchnl_req));
- vchnl_req.dev = dev;
- vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;
-
- ret_code = vchnl_vf_send_add_hmc_objs_req(dev,
- &vchnl_req,
- rsrc_type,
- start_index,
- rsrc_count);
- if (ret_code) {
- i40iw_debug(dev, I40IW_DEBUG_VIRT,
- "%s Send message failed 0x%0x\n", __func__, ret_code);
- return ret_code;
- }
- ret_code = i40iw_vf_wait_vchnl_resp(dev);
- if (ret_code)
- return ret_code;
- else
- return vchnl_req.ret_code;
-}
-
-/**
- * i40iw_vchnl_vf_del_hmc_obj - del HMC obj
- * @dev: IWARP device pointer
- * @rsrc_type: HMC Resource type
- * @start_index: Starting index of the object to delete
- * @rsrc_count: Number of resources to be delete
- */
-enum i40iw_status_code i40iw_vchnl_vf_del_hmc_obj(struct i40iw_sc_dev *dev,
- enum i40iw_hmc_rsrc_type rsrc_type,
- u32 start_index,
- u32 rsrc_count)
-{
- struct i40iw_virtchnl_req vchnl_req;
- enum i40iw_status_code ret_code;
-
- if (!i40iw_vf_clear_to_send(dev))
- return I40IW_ERR_TIMEOUT;
- memset(&vchnl_req, 0, sizeof(vchnl_req));
- vchnl_req.dev = dev;
- vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;
-
- ret_code = vchnl_vf_send_del_hmc_objs_req(dev,
- &vchnl_req,
- rsrc_type,
- start_index,
- rsrc_count);
- if (ret_code) {
- i40iw_debug(dev, I40IW_DEBUG_VIRT,
- "%s Send message failed 0x%0x\n", __func__, ret_code);
- return ret_code;
- }
- ret_code = i40iw_vf_wait_vchnl_resp(dev);
- if (ret_code)
- return ret_code;
- else
- return vchnl_req.ret_code;
-}
-
-/**
- * i40iw_vchnl_vf_get_pe_stats - Get PE stats
- * @dev: IWARP device pointer
- * @hw_stats: HW stats struct
- */
-enum i40iw_status_code i40iw_vchnl_vf_get_pe_stats(struct i40iw_sc_dev *dev,
- struct i40iw_dev_hw_stats *hw_stats)
-{
- struct i40iw_virtchnl_req vchnl_req;
- enum i40iw_status_code ret_code;
-
- if (!i40iw_vf_clear_to_send(dev))
- return I40IW_ERR_TIMEOUT;
- memset(&vchnl_req, 0, sizeof(vchnl_req));
- vchnl_req.dev = dev;
- vchnl_req.parm = hw_stats;
- vchnl_req.parm_len = sizeof(*hw_stats);
- vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;
-
- ret_code = vchnl_vf_send_get_pe_stats_req(dev, &vchnl_req);
- if (ret_code) {
- i40iw_debug(dev, I40IW_DEBUG_VIRT,
- "%s Send message failed 0x%0x\n", __func__, ret_code);
- return ret_code;
- }
- ret_code = i40iw_vf_wait_vchnl_resp(dev);
- if (ret_code)
- return ret_code;
- else
- return vchnl_req.ret_code;
-}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.h b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.h
deleted file mode 100644
index 24886ef08293..000000000000
--- a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.h
+++ /dev/null
@@ -1,124 +0,0 @@
-/*******************************************************************************
-*
-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
-*
-* This software is available to you under a choice of one of two
-* licenses. You may choose to be licensed under the terms of the GNU
-* General Public License (GPL) Version 2, available from the file
-* COPYING in the main directory of this source tree, or the
-* OpenFabrics.org BSD license below:
-*
-* Redistribution and use in source and binary forms, with or
-* without modification, are permitted provided that the following
-* conditions are met:
-*
-* - Redistributions of source code must retain the above
-* copyright notice, this list of conditions and the following
-* disclaimer.
-*
-* - Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials
-* provided with the distribution.
-*
-* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-* SOFTWARE.
-*
-*******************************************************************************/
-
-#ifndef I40IW_VIRTCHNL_H
-#define I40IW_VIRTCHNL_H
-
-#include "i40iw_hmc.h"
-
-#pragma pack(push, 1)
-
-struct i40iw_virtchnl_op_buf {
- u16 iw_op_code;
- u16 iw_op_ver;
- u16 iw_chnl_buf_len;
- u16 rsvd;
- u64 iw_chnl_op_ctx;
- /* Member alignment MUST be maintained above this location */
- u8 iw_chnl_buf[1];
-};
-
-struct i40iw_virtchnl_resp_buf {
- u64 iw_chnl_op_ctx;
- u16 iw_chnl_buf_len;
- s16 iw_op_ret_code;
- /* Member alignment MUST be maintained above this location */
- u16 rsvd[2];
- u8 iw_chnl_buf[1];
-};
-
-enum i40iw_virtchnl_ops {
- I40IW_VCHNL_OP_GET_VER = 0,
- I40IW_VCHNL_OP_GET_HMC_FCN,
- I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE,
- I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE,
- I40IW_VCHNL_OP_GET_STATS
-};
-
-#define I40IW_VCHNL_OP_GET_VER_V0 0
-#define I40IW_VCHNL_OP_GET_HMC_FCN_V0 0
-#define I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE_V0 0
-#define I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE_V0 0
-#define I40IW_VCHNL_OP_GET_STATS_V0 0
-#define I40IW_VCHNL_CHNL_VER_V0 0
-
-struct i40iw_dev_hw_stats;
-
-struct i40iw_virtchnl_hmc_obj_range {
- u16 obj_type;
- u16 rsvd;
- u32 start_index;
- u32 obj_count;
-};
-
-enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev,
- u32 vf_id,
- u8 *msg,
- u16 len);
-
-enum i40iw_status_code i40iw_vchnl_recv_vf(struct i40iw_sc_dev *dev,
- u32 vf_id,
- u8 *msg,
- u16 len);
-
-struct i40iw_virtchnl_req {
- struct i40iw_sc_dev *dev;
- struct i40iw_virtchnl_op_buf *vchnl_msg;
- void *parm;
- u32 vf_id;
- u16 parm_len;
- s16 ret_code;
-};
-
-#pragma pack(pop)
-
-enum i40iw_status_code i40iw_vchnl_vf_get_ver(struct i40iw_sc_dev *dev,
- u32 *vchnl_ver);
-
-enum i40iw_status_code i40iw_vchnl_vf_get_hmc_fcn(struct i40iw_sc_dev *dev,
- u16 *hmc_fcn);
-
-enum i40iw_status_code i40iw_vchnl_vf_add_hmc_objs(struct i40iw_sc_dev *dev,
- enum i40iw_hmc_rsrc_type rsrc_type,
- u32 start_index,
- u32 rsrc_count);
-
-enum i40iw_status_code i40iw_vchnl_vf_del_hmc_obj(struct i40iw_sc_dev *dev,
- enum i40iw_hmc_rsrc_type rsrc_type,
- u32 start_index,
- u32 rsrc_count);
-
-enum i40iw_status_code i40iw_vchnl_vf_get_pe_stats(struct i40iw_sc_dev *dev,
- struct i40iw_dev_hw_stats *hw_stats);
-#endif
diff --git a/drivers/infiniband/hw/irdma/Kconfig b/drivers/infiniband/hw/irdma/Kconfig
new file mode 100644
index 000000000000..dab88286d549
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config INFINIBAND_IRDMA
+ tristate "Intel(R) Ethernet Protocol Driver for RDMA"
+ depends on INET
+ depends on IPV6 || !IPV6
+ depends on PCI
+ depends on ICE && I40E
+ select GENERIC_ALLOCATOR
+ select CONFIG_AUXILIARY_BUS
+ help
+ This is an Intel(R) Ethernet Protocol Driver for RDMA driver
+ that support E810 (iWARP/RoCE) and X722 (iWARP) network devices.
diff --git a/drivers/infiniband/hw/irdma/Makefile b/drivers/infiniband/hw/irdma/Makefile
new file mode 100644
index 000000000000..48c3854235a0
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/Makefile
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+# Copyright (c) 2019, Intel Corporation.
+
+#
+# Makefile for the Intel(R) Ethernet Connection RDMA Linux Driver
+#
+
+obj-$(CONFIG_INFINIBAND_IRDMA) += irdma.o
+
+irdma-objs := cm.o \
+ ctrl.o \
+ hmc.o \
+ hw.o \
+ i40iw_hw.o \
+ i40iw_if.o \
+ icrdma_hw.o \
+ main.o \
+ pble.o \
+ puda.o \
+ trace.o \
+ uda.o \
+ uk.o \
+ utils.o \
+ verbs.o \
+ ws.o \
+
+CFLAGS_trace.o = -I$(src)
diff --git a/drivers/infiniband/hw/irdma/cm.c b/drivers/infiniband/hw/irdma/cm.c
new file mode 100644
index 000000000000..6b62299abfbb
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/cm.c
@@ -0,0 +1,4421 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#include "main.h"
+#include "trace.h"
+
+static void irdma_cm_post_event(struct irdma_cm_event *event);
+static void irdma_disconnect_worker(struct work_struct *work);
+
+/**
+ * irdma_free_sqbuf - put back puda buffer if refcount is 0
+ * @vsi: The VSI structure of the device
+ * @bufp: puda buffer to free
+ */
+void irdma_free_sqbuf(struct irdma_sc_vsi *vsi, void *bufp)
+{
+ struct irdma_puda_buf *buf = bufp;
+ struct irdma_puda_rsrc *ilq = vsi->ilq;
+
+ if (refcount_dec_and_test(&buf->refcount))
+ irdma_puda_ret_bufpool(ilq, buf);
+}
+
+/**
+ * irdma_record_ird_ord - Record IRD/ORD passed in
+ * @cm_node: connection's node
+ * @conn_ird: connection IRD
+ * @conn_ord: connection ORD
+ */
+static void irdma_record_ird_ord(struct irdma_cm_node *cm_node, u32 conn_ird,
+ u32 conn_ord)
+{
+ if (conn_ird > cm_node->dev->hw_attrs.max_hw_ird)
+ conn_ird = cm_node->dev->hw_attrs.max_hw_ird;
+
+ if (conn_ord > cm_node->dev->hw_attrs.max_hw_ord)
+ conn_ord = cm_node->dev->hw_attrs.max_hw_ord;
+ else if (!conn_ord && cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO)
+ conn_ord = 1;
+ cm_node->ird_size = conn_ird;
+ cm_node->ord_size = conn_ord;
+}
+
+/**
+ * irdma_copy_ip_ntohl - copy IP address from network to host
+ * @dst: IP address in host order
+ * @src: IP address in network order (big endian)
+ */
+void irdma_copy_ip_ntohl(u32 *dst, __be32 *src)
+{
+ *dst++ = ntohl(*src++);
+ *dst++ = ntohl(*src++);
+ *dst++ = ntohl(*src++);
+ *dst = ntohl(*src);
+}
+
+/**
+ * irdma_copy_ip_htonl - copy IP address from host to network order
+ * @dst: IP address in network order (big endian)
+ * @src: IP address in host order
+ */
+void irdma_copy_ip_htonl(__be32 *dst, u32 *src)
+{
+ *dst++ = htonl(*src++);
+ *dst++ = htonl(*src++);
+ *dst++ = htonl(*src++);
+ *dst = htonl(*src);
+}
+
+/**
+ * irdma_get_addr_info
+ * @cm_node: contains ip/tcp info
+ * @cm_info: to get a copy of the cm_node ip/tcp info
+ */
+static void irdma_get_addr_info(struct irdma_cm_node *cm_node,
+ struct irdma_cm_info *cm_info)
+{
+ memset(cm_info, 0, sizeof(*cm_info));
+ cm_info->ipv4 = cm_node->ipv4;
+ cm_info->vlan_id = cm_node->vlan_id;
+ memcpy(cm_info->loc_addr, cm_node->loc_addr, sizeof(cm_info->loc_addr));
+ memcpy(cm_info->rem_addr, cm_node->rem_addr, sizeof(cm_info->rem_addr));
+ cm_info->loc_port = cm_node->loc_port;
+ cm_info->rem_port = cm_node->rem_port;
+}
+
+/**
+ * irdma_fill_sockaddr4 - fill in addr info for IPv4 connection
+ * @cm_node: connection's node
+ * @event: upper layer's cm event
+ */
+static inline void irdma_fill_sockaddr4(struct irdma_cm_node *cm_node,
+ struct iw_cm_event *event)
+{
+ struct sockaddr_in *laddr = (struct sockaddr_in *)&event->local_addr;
+ struct sockaddr_in *raddr = (struct sockaddr_in *)&event->remote_addr;
+
+ laddr->sin_family = AF_INET;
+ raddr->sin_family = AF_INET;
+
+ laddr->sin_port = htons(cm_node->loc_port);
+ raddr->sin_port = htons(cm_node->rem_port);
+
+ laddr->sin_addr.s_addr = htonl(cm_node->loc_addr[0]);
+ raddr->sin_addr.s_addr = htonl(cm_node->rem_addr[0]);
+}
+
+/**
+ * irdma_fill_sockaddr6 - fill in addr info for IPv6 connection
+ * @cm_node: connection's node
+ * @event: upper layer's cm event
+ */
+static inline void irdma_fill_sockaddr6(struct irdma_cm_node *cm_node,
+ struct iw_cm_event *event)
+{
+ struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)&event->local_addr;
+ struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)&event->remote_addr;
+
+ laddr6->sin6_family = AF_INET6;
+ raddr6->sin6_family = AF_INET6;
+
+ laddr6->sin6_port = htons(cm_node->loc_port);
+ raddr6->sin6_port = htons(cm_node->rem_port);
+
+ irdma_copy_ip_htonl(laddr6->sin6_addr.in6_u.u6_addr32,
+ cm_node->loc_addr);
+ irdma_copy_ip_htonl(raddr6->sin6_addr.in6_u.u6_addr32,
+ cm_node->rem_addr);
+}
+
+/**
+ * irdma_get_cmevent_info - for cm event upcall
+ * @cm_node: connection's node
+ * @cm_id: upper layers cm struct for the event
+ * @event: upper layer's cm event
+ */
+static inline void irdma_get_cmevent_info(struct irdma_cm_node *cm_node,
+ struct iw_cm_id *cm_id,
+ struct iw_cm_event *event)
+{
+ memcpy(&event->local_addr, &cm_id->m_local_addr,
+ sizeof(event->local_addr));
+ memcpy(&event->remote_addr, &cm_id->m_remote_addr,
+ sizeof(event->remote_addr));
+ if (cm_node) {
+ event->private_data = cm_node->pdata_buf;
+ event->private_data_len = (u8)cm_node->pdata.size;
+ event->ird = cm_node->ird_size;
+ event->ord = cm_node->ord_size;
+ }
+}
+
+/**
+ * irdma_send_cm_event - upcall cm's event handler
+ * @cm_node: connection's node
+ * @cm_id: upper layer's cm info struct
+ * @type: Event type to indicate
+ * @status: status for the event type
+ */
+static int irdma_send_cm_event(struct irdma_cm_node *cm_node,
+ struct iw_cm_id *cm_id,
+ enum iw_cm_event_type type, int status)
+{
+ struct iw_cm_event event = {};
+
+ event.event = type;
+ event.status = status;
+ trace_irdma_send_cm_event(cm_node, cm_id, type, status,
+ __builtin_return_address(0));
+
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: cm_node %p cm_id=%p state=%d accel=%d event_type=%d status=%d\n",
+ cm_node, cm_id, cm_node->accelerated, cm_node->state, type,
+ status);
+
+ switch (type) {
+ case IW_CM_EVENT_CONNECT_REQUEST:
+ if (cm_node->ipv4)
+ irdma_fill_sockaddr4(cm_node, &event);
+ else
+ irdma_fill_sockaddr6(cm_node, &event);
+ event.provider_data = cm_node;
+ event.private_data = cm_node->pdata_buf;
+ event.private_data_len = (u8)cm_node->pdata.size;
+ event.ird = cm_node->ird_size;
+ break;
+ case IW_CM_EVENT_CONNECT_REPLY:
+ irdma_get_cmevent_info(cm_node, cm_id, &event);
+ break;
+ case IW_CM_EVENT_ESTABLISHED:
+ event.ird = cm_node->ird_size;
+ event.ord = cm_node->ord_size;
+ break;
+ case IW_CM_EVENT_DISCONNECT:
+ case IW_CM_EVENT_CLOSE:
+ /* Wait if we are in RTS but havent issued the iwcm event upcall */
+ if (!cm_node->accelerated)
+ wait_for_completion(&cm_node->establish_comp);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return cm_id->event_handler(cm_id, &event);
+}
+
+/**
+ * irdma_timer_list_prep - add connection nodes to a list to perform timer tasks
+ * @cm_core: cm's core
+ * @timer_list: a timer list to which cm_node will be selected
+ */
+static void irdma_timer_list_prep(struct irdma_cm_core *cm_core,
+ struct list_head *timer_list)
+{
+ struct irdma_cm_node *cm_node;
+ int bkt;
+
+ hash_for_each_rcu(cm_core->cm_hash_tbl, bkt, cm_node, list) {
+ if ((cm_node->close_entry || cm_node->send_entry) &&
+ refcount_inc_not_zero(&cm_node->refcnt))
+ list_add(&cm_node->timer_entry, timer_list);
+ }
+}
+
+/**
+ * irdma_create_event - create cm event
+ * @cm_node: connection's node
+ * @type: Event type to generate
+ */
+static struct irdma_cm_event *irdma_create_event(struct irdma_cm_node *cm_node,
+ enum irdma_cm_event_type type)
+{
+ struct irdma_cm_event *event;
+
+ if (!cm_node->cm_id)
+ return NULL;
+
+ event = kzalloc(sizeof(*event), GFP_ATOMIC);
+
+ if (!event)
+ return NULL;
+
+ event->type = type;
+ event->cm_node = cm_node;
+ memcpy(event->cm_info.rem_addr, cm_node->rem_addr,
+ sizeof(event->cm_info.rem_addr));
+ memcpy(event->cm_info.loc_addr, cm_node->loc_addr,
+ sizeof(event->cm_info.loc_addr));
+ event->cm_info.rem_port = cm_node->rem_port;
+ event->cm_info.loc_port = cm_node->loc_port;
+ event->cm_info.cm_id = cm_node->cm_id;
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: node=%p event=%p type=%u dst=%pI4 src=%pI4\n", cm_node,
+ event, type, event->cm_info.loc_addr,
+ event->cm_info.rem_addr);
+ trace_irdma_create_event(cm_node, type, __builtin_return_address(0));
+ irdma_cm_post_event(event);
+
+ return event;
+}
+
+/**
+ * irdma_free_retrans_entry - free send entry
+ * @cm_node: connection's node
+ */
+static void irdma_free_retrans_entry(struct irdma_cm_node *cm_node)
+{
+ struct irdma_device *iwdev = cm_node->iwdev;
+ struct irdma_timer_entry *send_entry;
+
+ send_entry = cm_node->send_entry;
+ if (!send_entry)
+ return;
+
+ cm_node->send_entry = NULL;
+ irdma_free_sqbuf(&iwdev->vsi, send_entry->sqbuf);
+ kfree(send_entry);
+ refcount_dec(&cm_node->refcnt);
+}
+
+/**
+ * irdma_cleanup_retrans_entry - free send entry with lock
+ * @cm_node: connection's node
+ */
+static void irdma_cleanup_retrans_entry(struct irdma_cm_node *cm_node)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ irdma_free_retrans_entry(cm_node);
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+}
+
+/**
+ * irdma_form_ah_cm_frame - get a free packet and build frame with address handle
+ * @cm_node: connection's node ionfo to use in frame
+ * @options: pointer to options info
+ * @hdr: pointer mpa header
+ * @pdata: pointer to private data
+ * @flags: indicates FIN or ACK
+ */
+static struct irdma_puda_buf *irdma_form_ah_cm_frame(struct irdma_cm_node *cm_node,
+ struct irdma_kmem_info *options,
+ struct irdma_kmem_info *hdr,
+ struct irdma_mpa_priv_info *pdata,
+ u8 flags)
+{
+ struct irdma_puda_buf *sqbuf;
+ struct irdma_sc_vsi *vsi = &cm_node->iwdev->vsi;
+ u8 *buf;
+ struct tcphdr *tcph;
+ u16 pktsize;
+ u32 opts_len = 0;
+ u32 pd_len = 0;
+ u32 hdr_len = 0;
+
+ if (!cm_node->ah || !cm_node->ah->ah_info.ah_valid) {
+ ibdev_dbg(&cm_node->iwdev->ibdev, "CM: AH invalid\n");
+ return NULL;
+ }
+
+ sqbuf = irdma_puda_get_bufpool(vsi->ilq);
+ if (!sqbuf) {
+ ibdev_dbg(&cm_node->iwdev->ibdev, "CM: SQ buf NULL\n");
+ return NULL;
+ }
+
+ sqbuf->ah_id = cm_node->ah->ah_info.ah_idx;
+ buf = sqbuf->mem.va;
+ if (options)
+ opts_len = (u32)options->size;
+
+ if (hdr)
+ hdr_len = hdr->size;
+
+ if (pdata)
+ pd_len = pdata->size;
+
+ pktsize = sizeof(*tcph) + opts_len + hdr_len + pd_len;
+
+ memset(buf, 0, pktsize);
+
+ sqbuf->totallen = pktsize;
+ sqbuf->tcphlen = sizeof(*tcph) + opts_len;
+ sqbuf->scratch = cm_node;
+
+ tcph = (struct tcphdr *)buf;
+ buf += sizeof(*tcph);
+
+ tcph->source = htons(cm_node->loc_port);
+ tcph->dest = htons(cm_node->rem_port);
+ tcph->seq = htonl(cm_node->tcp_cntxt.loc_seq_num);
+
+ if (flags & SET_ACK) {
+ cm_node->tcp_cntxt.loc_ack_num = cm_node->tcp_cntxt.rcv_nxt;
+ tcph->ack_seq = htonl(cm_node->tcp_cntxt.loc_ack_num);
+ tcph->ack = 1;
+ } else {
+ tcph->ack_seq = 0;
+ }
+
+ if (flags & SET_SYN) {
+ cm_node->tcp_cntxt.loc_seq_num++;
+ tcph->syn = 1;
+ } else {
+ cm_node->tcp_cntxt.loc_seq_num += hdr_len + pd_len;
+ }
+
+ if (flags & SET_FIN) {
+ cm_node->tcp_cntxt.loc_seq_num++;
+ tcph->fin = 1;
+ }
+
+ if (flags & SET_RST)
+ tcph->rst = 1;
+
+ tcph->doff = (u16)((sizeof(*tcph) + opts_len + 3) >> 2);
+ sqbuf->tcphlen = tcph->doff << 2;
+ tcph->window = htons(cm_node->tcp_cntxt.rcv_wnd);
+ tcph->urg_ptr = 0;
+
+ if (opts_len) {
+ memcpy(buf, options->addr, opts_len);
+ buf += opts_len;
+ }
+
+ if (hdr_len) {
+ memcpy(buf, hdr->addr, hdr_len);
+ buf += hdr_len;
+ }
+
+ if (pdata && pdata->addr)
+ memcpy(buf, pdata->addr, pdata->size);
+
+ refcount_set(&sqbuf->refcount, 1);
+
+ print_hex_dump_debug("ILQ: TRANSMIT ILQ BUFFER", DUMP_PREFIX_OFFSET,
+ 16, 8, sqbuf->mem.va, sqbuf->totallen, false);
+
+ return sqbuf;
+}
+
+/**
+ * irdma_form_uda_cm_frame - get a free packet and build frame full tcpip packet
+ * @cm_node: connection's node ionfo to use in frame
+ * @options: pointer to options info
+ * @hdr: pointer mpa header
+ * @pdata: pointer to private data
+ * @flags: indicates FIN or ACK
+ */
+static struct irdma_puda_buf *irdma_form_uda_cm_frame(struct irdma_cm_node *cm_node,
+ struct irdma_kmem_info *options,
+ struct irdma_kmem_info *hdr,
+ struct irdma_mpa_priv_info *pdata,
+ u8 flags)
+{
+ struct irdma_puda_buf *sqbuf;
+ struct irdma_sc_vsi *vsi = &cm_node->iwdev->vsi;
+ u8 *buf;
+
+ struct tcphdr *tcph;
+ struct iphdr *iph;
+ struct ipv6hdr *ip6h;
+ struct ethhdr *ethh;
+ u16 pktsize;
+ u16 eth_hlen = ETH_HLEN;
+ u32 opts_len = 0;
+ u32 pd_len = 0;
+ u32 hdr_len = 0;
+
+ u16 vtag;
+
+ sqbuf = irdma_puda_get_bufpool(vsi->ilq);
+ if (!sqbuf)
+ return NULL;
+
+ buf = sqbuf->mem.va;
+
+ if (options)
+ opts_len = (u32)options->size;
+
+ if (hdr)
+ hdr_len = hdr->size;
+
+ if (pdata)
+ pd_len = pdata->size;
+
+ if (cm_node->vlan_id < VLAN_N_VID)
+ eth_hlen += 4;
+
+ if (cm_node->ipv4)
+ pktsize = sizeof(*iph) + sizeof(*tcph);
+ else
+ pktsize = sizeof(*ip6h) + sizeof(*tcph);
+ pktsize += opts_len + hdr_len + pd_len;
+
+ memset(buf, 0, eth_hlen + pktsize);
+
+ sqbuf->totallen = pktsize + eth_hlen;
+ sqbuf->maclen = eth_hlen;
+ sqbuf->tcphlen = sizeof(*tcph) + opts_len;
+ sqbuf->scratch = cm_node;
+
+ ethh = (struct ethhdr *)buf;
+ buf += eth_hlen;
+
+ if (cm_node->do_lpb)
+ sqbuf->do_lpb = true;
+
+ if (cm_node->ipv4) {
+ sqbuf->ipv4 = true;
+
+ iph = (struct iphdr *)buf;
+ buf += sizeof(*iph);
+ tcph = (struct tcphdr *)buf;
+ buf += sizeof(*tcph);
+
+ ether_addr_copy(ethh->h_dest, cm_node->rem_mac);
+ ether_addr_copy(ethh->h_source, cm_node->loc_mac);
+ if (cm_node->vlan_id < VLAN_N_VID) {
+ ((struct vlan_ethhdr *)ethh)->h_vlan_proto =
+ htons(ETH_P_8021Q);
+ vtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) |
+ cm_node->vlan_id;
+ ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(vtag);
+
+ ((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto =
+ htons(ETH_P_IP);
+ } else {
+ ethh->h_proto = htons(ETH_P_IP);
+ }
+
+ iph->version = IPVERSION;
+ iph->ihl = 5; /* 5 * 4Byte words, IP headr len */
+ iph->tos = cm_node->tos;
+ iph->tot_len = htons(pktsize);
+ iph->id = htons(++cm_node->tcp_cntxt.loc_id);
+
+ iph->frag_off = htons(0x4000);
+ iph->ttl = 0x40;
+ iph->protocol = IPPROTO_TCP;
+ iph->saddr = htonl(cm_node->loc_addr[0]);
+ iph->daddr = htonl(cm_node->rem_addr[0]);
+ } else {
+ sqbuf->ipv4 = false;
+ ip6h = (struct ipv6hdr *)buf;
+ buf += sizeof(*ip6h);
+ tcph = (struct tcphdr *)buf;
+ buf += sizeof(*tcph);
+
+ ether_addr_copy(ethh->h_dest, cm_node->rem_mac);
+ ether_addr_copy(ethh->h_source, cm_node->loc_mac);
+ if (cm_node->vlan_id < VLAN_N_VID) {
+ ((struct vlan_ethhdr *)ethh)->h_vlan_proto =
+ htons(ETH_P_8021Q);
+ vtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) |
+ cm_node->vlan_id;
+ ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(vtag);
+ ((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto =
+ htons(ETH_P_IPV6);
+ } else {
+ ethh->h_proto = htons(ETH_P_IPV6);
+ }
+ ip6h->version = 6;
+ ip6h->priority = cm_node->tos >> 4;
+ ip6h->flow_lbl[0] = cm_node->tos << 4;
+ ip6h->flow_lbl[1] = 0;
+ ip6h->flow_lbl[2] = 0;
+ ip6h->payload_len = htons(pktsize - sizeof(*ip6h));
+ ip6h->nexthdr = 6;
+ ip6h->hop_limit = 128;
+ irdma_copy_ip_htonl(ip6h->saddr.in6_u.u6_addr32,
+ cm_node->loc_addr);
+ irdma_copy_ip_htonl(ip6h->daddr.in6_u.u6_addr32,
+ cm_node->rem_addr);
+ }
+
+ tcph->source = htons(cm_node->loc_port);
+ tcph->dest = htons(cm_node->rem_port);
+ tcph->seq = htonl(cm_node->tcp_cntxt.loc_seq_num);
+
+ if (flags & SET_ACK) {
+ cm_node->tcp_cntxt.loc_ack_num = cm_node->tcp_cntxt.rcv_nxt;
+ tcph->ack_seq = htonl(cm_node->tcp_cntxt.loc_ack_num);
+ tcph->ack = 1;
+ } else {
+ tcph->ack_seq = 0;
+ }
+
+ if (flags & SET_SYN) {
+ cm_node->tcp_cntxt.loc_seq_num++;
+ tcph->syn = 1;
+ } else {
+ cm_node->tcp_cntxt.loc_seq_num += hdr_len + pd_len;
+ }
+
+ if (flags & SET_FIN) {
+ cm_node->tcp_cntxt.loc_seq_num++;
+ tcph->fin = 1;
+ }
+
+ if (flags & SET_RST)
+ tcph->rst = 1;
+
+ tcph->doff = (u16)((sizeof(*tcph) + opts_len + 3) >> 2);
+ sqbuf->tcphlen = tcph->doff << 2;
+ tcph->window = htons(cm_node->tcp_cntxt.rcv_wnd);
+ tcph->urg_ptr = 0;
+
+ if (opts_len) {
+ memcpy(buf, options->addr, opts_len);
+ buf += opts_len;
+ }
+
+ if (hdr_len) {
+ memcpy(buf, hdr->addr, hdr_len);
+ buf += hdr_len;
+ }
+
+ if (pdata && pdata->addr)
+ memcpy(buf, pdata->addr, pdata->size);
+
+ refcount_set(&sqbuf->refcount, 1);
+
+ print_hex_dump_debug("ILQ: TRANSMIT ILQ BUFFER", DUMP_PREFIX_OFFSET,
+ 16, 8, sqbuf->mem.va, sqbuf->totallen, false);
+ return sqbuf;
+}
+
+/**
+ * irdma_send_reset - Send RST packet
+ * @cm_node: connection's node
+ */
+int irdma_send_reset(struct irdma_cm_node *cm_node)
+{
+ struct irdma_puda_buf *sqbuf;
+ int flags = SET_RST | SET_ACK;
+
+ trace_irdma_send_reset(cm_node, 0, __builtin_return_address(0));
+ sqbuf = cm_node->cm_core->form_cm_frame(cm_node, NULL, NULL, NULL,
+ flags);
+ if (!sqbuf)
+ return -ENOMEM;
+
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: caller: %pS cm_node %p cm_id=%p accel=%d state=%d rem_port=0x%04x, loc_port=0x%04x rem_addr=%pI4 loc_addr=%pI4\n",
+ __builtin_return_address(0), cm_node, cm_node->cm_id,
+ cm_node->accelerated, cm_node->state, cm_node->rem_port,
+ cm_node->loc_port, cm_node->rem_addr, cm_node->loc_addr);
+
+ return irdma_schedule_cm_timer(cm_node, sqbuf, IRDMA_TIMER_TYPE_SEND, 0,
+ 1);
+}
+
+/**
+ * irdma_active_open_err - send event for active side cm error
+ * @cm_node: connection's node
+ * @reset: Flag to send reset or not
+ */
+static void irdma_active_open_err(struct irdma_cm_node *cm_node, bool reset)
+{
+ trace_irdma_active_open_err(cm_node, reset,
+ __builtin_return_address(0));
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->cm_core->stats_connect_errs++;
+ if (reset) {
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: cm_node=%p state=%d\n", cm_node,
+ cm_node->state);
+ refcount_inc(&cm_node->refcnt);
+ irdma_send_reset(cm_node);
+ }
+
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ irdma_create_event(cm_node, IRDMA_CM_EVENT_ABORTED);
+}
+
+/**
+ * irdma_passive_open_err - handle passive side cm error
+ * @cm_node: connection's node
+ * @reset: send reset or just free cm_node
+ */
+static void irdma_passive_open_err(struct irdma_cm_node *cm_node, bool reset)
+{
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->cm_core->stats_passive_errs++;
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ ibdev_dbg(&cm_node->iwdev->ibdev, "CM: cm_node=%p state =%d\n",
+ cm_node, cm_node->state);
+ trace_irdma_passive_open_err(cm_node, reset,
+ __builtin_return_address(0));
+ if (reset)
+ irdma_send_reset(cm_node);
+ else
+ irdma_rem_ref_cm_node(cm_node);
+}
+
+/**
+ * irdma_event_connect_error - to create connect error event
+ * @event: cm information for connect event
+ */
+static void irdma_event_connect_error(struct irdma_cm_event *event)
+{
+ struct irdma_qp *iwqp;
+ struct iw_cm_id *cm_id;
+
+ cm_id = event->cm_node->cm_id;
+ if (!cm_id)
+ return;
+
+ iwqp = cm_id->provider_data;
+
+ if (!iwqp || !iwqp->iwdev)
+ return;
+
+ iwqp->cm_id = NULL;
+ cm_id->provider_data = NULL;
+ irdma_send_cm_event(event->cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY,
+ -ECONNRESET);
+ irdma_rem_ref_cm_node(event->cm_node);
+}
+
+/**
+ * irdma_process_options - process options from TCP header
+ * @cm_node: connection's node
+ * @optionsloc: point to start of options
+ * @optionsize: size of all options
+ * @syn_pkt: flag if syn packet
+ */
+static int irdma_process_options(struct irdma_cm_node *cm_node, u8 *optionsloc,
+ u32 optionsize, u32 syn_pkt)
+{
+ u32 tmp;
+ u32 offset = 0;
+ union all_known_options *all_options;
+ char got_mss_option = 0;
+
+ while (offset < optionsize) {
+ all_options = (union all_known_options *)(optionsloc + offset);
+ switch (all_options->base.optionnum) {
+ case OPTION_NUM_EOL:
+ offset = optionsize;
+ break;
+ case OPTION_NUM_NONE:
+ offset += 1;
+ continue;
+ case OPTION_NUM_MSS:
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: MSS Length: %d Offset: %d Size: %d\n",
+ all_options->mss.len, offset, optionsize);
+ got_mss_option = 1;
+ if (all_options->mss.len != 4)
+ return -EINVAL;
+ tmp = ntohs(all_options->mss.mss);
+ if ((cm_node->ipv4 &&
+ (tmp + IRDMA_MTU_TO_MSS_IPV4) < IRDMA_MIN_MTU_IPV4) ||
+ (!cm_node->ipv4 &&
+ (tmp + IRDMA_MTU_TO_MSS_IPV6) < IRDMA_MIN_MTU_IPV6))
+ return -EINVAL;
+ if (tmp < cm_node->tcp_cntxt.mss)
+ cm_node->tcp_cntxt.mss = tmp;
+ break;
+ case OPTION_NUM_WINDOW_SCALE:
+ cm_node->tcp_cntxt.snd_wscale =
+ all_options->windowscale.shiftcount;
+ break;
+ default:
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: Unsupported TCP Option: %x\n",
+ all_options->base.optionnum);
+ break;
+ }
+ offset += all_options->base.len;
+ }
+ if (!got_mss_option && syn_pkt)
+ cm_node->tcp_cntxt.mss = IRDMA_CM_DEFAULT_MSS;
+
+ return 0;
+}
+
+/**
+ * irdma_handle_tcp_options - setup TCP context info after parsing TCP options
+ * @cm_node: connection's node
+ * @tcph: pointer tcp header
+ * @optionsize: size of options rcvd
+ * @passive: active or passive flag
+ */
+static int irdma_handle_tcp_options(struct irdma_cm_node *cm_node,
+ struct tcphdr *tcph, int optionsize,
+ int passive)
+{
+ u8 *optionsloc = (u8 *)&tcph[1];
+ int ret;
+
+ if (optionsize) {
+ ret = irdma_process_options(cm_node, optionsloc, optionsize,
+ (u32)tcph->syn);
+ if (ret) {
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: Node %p, Sending Reset\n", cm_node);
+ if (passive)
+ irdma_passive_open_err(cm_node, true);
+ else
+ irdma_active_open_err(cm_node, true);
+ return ret;
+ }
+ }
+
+ cm_node->tcp_cntxt.snd_wnd = ntohs(tcph->window)
+ << cm_node->tcp_cntxt.snd_wscale;
+
+ if (cm_node->tcp_cntxt.snd_wnd > cm_node->tcp_cntxt.max_snd_wnd)
+ cm_node->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.snd_wnd;
+
+ return 0;
+}
+
+/**
+ * irdma_build_mpa_v1 - build a MPA V1 frame
+ * @cm_node: connection's node
+ * @start_addr: address where to build frame
+ * @mpa_key: to do read0 or write0
+ */
+static void irdma_build_mpa_v1(struct irdma_cm_node *cm_node, void *start_addr,
+ u8 mpa_key)
+{
+ struct ietf_mpa_v1 *mpa_frame = start_addr;
+
+ switch (mpa_key) {
+ case MPA_KEY_REQUEST:
+ memcpy(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE);
+ break;
+ case MPA_KEY_REPLY:
+ memcpy(mpa_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE);
+ break;
+ default:
+ break;
+ }
+ mpa_frame->flags = IETF_MPA_FLAGS_CRC;
+ mpa_frame->rev = cm_node->mpa_frame_rev;
+ mpa_frame->priv_data_len = htons(cm_node->pdata.size);
+}
+
+/**
+ * irdma_build_mpa_v2 - build a MPA V2 frame
+ * @cm_node: connection's node
+ * @start_addr: buffer start address
+ * @mpa_key: to do read0 or write0
+ */
+static void irdma_build_mpa_v2(struct irdma_cm_node *cm_node, void *start_addr,
+ u8 mpa_key)
+{
+ struct ietf_mpa_v2 *mpa_frame = start_addr;
+ struct ietf_rtr_msg *rtr_msg = &mpa_frame->rtr_msg;
+ u16 ctrl_ird, ctrl_ord;
+
+ /* initialize the upper 5 bytes of the frame */
+ irdma_build_mpa_v1(cm_node, start_addr, mpa_key);
+ mpa_frame->flags |= IETF_MPA_V2_FLAG;
+ if (cm_node->iwdev->iw_ooo) {
+ mpa_frame->flags |= IETF_MPA_FLAGS_MARKERS;
+ cm_node->rcv_mark_en = true;
+ }
+ mpa_frame->priv_data_len = cpu_to_be16(be16_to_cpu(mpa_frame->priv_data_len) +
+ IETF_RTR_MSG_SIZE);
+
+ /* initialize RTR msg */
+ if (cm_node->mpav2_ird_ord == IETF_NO_IRD_ORD) {
+ ctrl_ird = IETF_NO_IRD_ORD;
+ ctrl_ord = IETF_NO_IRD_ORD;
+ } else {
+ ctrl_ird = (cm_node->ird_size > IETF_NO_IRD_ORD) ?
+ IETF_NO_IRD_ORD :
+ cm_node->ird_size;
+ ctrl_ord = (cm_node->ord_size > IETF_NO_IRD_ORD) ?
+ IETF_NO_IRD_ORD :
+ cm_node->ord_size;
+ }
+ ctrl_ird |= IETF_PEER_TO_PEER;
+
+ switch (mpa_key) {
+ case MPA_KEY_REQUEST:
+ ctrl_ord |= IETF_RDMA0_WRITE;
+ ctrl_ord |= IETF_RDMA0_READ;
+ break;
+ case MPA_KEY_REPLY:
+ switch (cm_node->send_rdma0_op) {
+ case SEND_RDMA_WRITE_ZERO:
+ ctrl_ord |= IETF_RDMA0_WRITE;
+ break;
+ case SEND_RDMA_READ_ZERO:
+ ctrl_ord |= IETF_RDMA0_READ;
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ rtr_msg->ctrl_ird = htons(ctrl_ird);
+ rtr_msg->ctrl_ord = htons(ctrl_ord);
+}
+
+/**
+ * irdma_cm_build_mpa_frame - build mpa frame for mpa version 1 or version 2
+ * @cm_node: connection's node
+ * @mpa: mpa: data buffer
+ * @mpa_key: to do read0 or write0
+ */
+static int irdma_cm_build_mpa_frame(struct irdma_cm_node *cm_node,
+ struct irdma_kmem_info *mpa, u8 mpa_key)
+{
+ int hdr_len = 0;
+
+ switch (cm_node->mpa_frame_rev) {
+ case IETF_MPA_V1:
+ hdr_len = sizeof(struct ietf_mpa_v1);
+ irdma_build_mpa_v1(cm_node, mpa->addr, mpa_key);
+ break;
+ case IETF_MPA_V2:
+ hdr_len = sizeof(struct ietf_mpa_v2);
+ irdma_build_mpa_v2(cm_node, mpa->addr, mpa_key);
+ break;
+ default:
+ break;
+ }
+
+ return hdr_len;
+}
+
+/**
+ * irdma_send_mpa_request - active node send mpa request to passive node
+ * @cm_node: connection's node
+ */
+static int irdma_send_mpa_request(struct irdma_cm_node *cm_node)
+{
+ struct irdma_puda_buf *sqbuf;
+
+ cm_node->mpa_hdr.addr = &cm_node->mpa_v2_frame;
+ cm_node->mpa_hdr.size = irdma_cm_build_mpa_frame(cm_node,
+ &cm_node->mpa_hdr,
+ MPA_KEY_REQUEST);
+ if (!cm_node->mpa_hdr.size) {
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: mpa size = %d\n", cm_node->mpa_hdr.size);
+ return -EINVAL;
+ }
+
+ sqbuf = cm_node->cm_core->form_cm_frame(cm_node, NULL,
+ &cm_node->mpa_hdr,
+ &cm_node->pdata, SET_ACK);
+ if (!sqbuf)
+ return -ENOMEM;
+
+ return irdma_schedule_cm_timer(cm_node, sqbuf, IRDMA_TIMER_TYPE_SEND, 1,
+ 0);
+}
+
+/**
+ * irdma_send_mpa_reject -
+ * @cm_node: connection's node
+ * @pdata: reject data for connection
+ * @plen: length of reject data
+ */
+static int irdma_send_mpa_reject(struct irdma_cm_node *cm_node,
+ const void *pdata, u8 plen)
+{
+ struct irdma_puda_buf *sqbuf;
+ struct irdma_mpa_priv_info priv_info;
+
+ cm_node->mpa_hdr.addr = &cm_node->mpa_v2_frame;
+ cm_node->mpa_hdr.size = irdma_cm_build_mpa_frame(cm_node,
+ &cm_node->mpa_hdr,
+ MPA_KEY_REPLY);
+
+ cm_node->mpa_frame.flags |= IETF_MPA_FLAGS_REJECT;
+ priv_info.addr = pdata;
+ priv_info.size = plen;
+
+ sqbuf = cm_node->cm_core->form_cm_frame(cm_node, NULL,
+ &cm_node->mpa_hdr, &priv_info,
+ SET_ACK | SET_FIN);
+ if (!sqbuf)
+ return -ENOMEM;
+
+ cm_node->state = IRDMA_CM_STATE_FIN_WAIT1;
+
+ return irdma_schedule_cm_timer(cm_node, sqbuf, IRDMA_TIMER_TYPE_SEND, 1,
+ 0);
+}
+
+/**
+ * irdma_negotiate_mpa_v2_ird_ord - negotiate MPAv2 IRD/ORD
+ * @cm_node: connection's node
+ * @buf: Data pointer
+ */
+static int irdma_negotiate_mpa_v2_ird_ord(struct irdma_cm_node *cm_node,
+ u8 *buf)
+{
+ struct ietf_mpa_v2 *mpa_v2_frame;
+ struct ietf_rtr_msg *rtr_msg;
+ u16 ird_size;
+ u16 ord_size;
+ u16 ctrl_ord;
+ u16 ctrl_ird;
+
+ mpa_v2_frame = (struct ietf_mpa_v2 *)buf;
+ rtr_msg = &mpa_v2_frame->rtr_msg;
+
+ /* parse rtr message */
+ ctrl_ord = ntohs(rtr_msg->ctrl_ord);
+ ctrl_ird = ntohs(rtr_msg->ctrl_ird);
+ ird_size = ctrl_ird & IETF_NO_IRD_ORD;
+ ord_size = ctrl_ord & IETF_NO_IRD_ORD;
+
+ if (!(ctrl_ird & IETF_PEER_TO_PEER))
+ return -EOPNOTSUPP;
+
+ if (ird_size == IETF_NO_IRD_ORD || ord_size == IETF_NO_IRD_ORD) {
+ cm_node->mpav2_ird_ord = IETF_NO_IRD_ORD;
+ goto negotiate_done;
+ }
+
+ if (cm_node->state != IRDMA_CM_STATE_MPAREQ_SENT) {
+ /* responder */
+ if (!ord_size && (ctrl_ord & IETF_RDMA0_READ))
+ cm_node->ird_size = 1;
+ if (cm_node->ord_size > ird_size)
+ cm_node->ord_size = ird_size;
+ } else {
+ /* initiator */
+ if (!ird_size && (ctrl_ord & IETF_RDMA0_READ))
+ /* Remote peer doesn't support RDMA0_READ */
+ return -EOPNOTSUPP;
+
+ if (cm_node->ord_size > ird_size)
+ cm_node->ord_size = ird_size;
+
+ if (cm_node->ird_size < ord_size)
+ /* no resources available */
+ return -EINVAL;
+ }
+
+negotiate_done:
+ if (ctrl_ord & IETF_RDMA0_READ)
+ cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;
+ else if (ctrl_ord & IETF_RDMA0_WRITE)
+ cm_node->send_rdma0_op = SEND_RDMA_WRITE_ZERO;
+ else
+ /* Not supported RDMA0 operation */
+ return -EOPNOTSUPP;
+
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: MPAV2 Negotiated ORD: %d, IRD: %d\n",
+ cm_node->ord_size, cm_node->ird_size);
+ trace_irdma_negotiate_mpa_v2(cm_node);
+ return 0;
+}
+
+/**
+ * irdma_parse_mpa - process an IETF MPA frame
+ * @cm_node: connection's node
+ * @buf: Data pointer
+ * @type: to return accept or reject
+ * @len: Len of mpa buffer
+ */
+static int irdma_parse_mpa(struct irdma_cm_node *cm_node, u8 *buf, u32 *type,
+ u32 len)
+{
+ struct ietf_mpa_v1 *mpa_frame;
+ int mpa_hdr_len, priv_data_len, ret;
+
+ *type = IRDMA_MPA_REQUEST_ACCEPT;
+
+ if (len < sizeof(struct ietf_mpa_v1)) {
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: ietf buffer small (%x)\n", len);
+ return -EINVAL;
+ }
+
+ mpa_frame = (struct ietf_mpa_v1 *)buf;
+ mpa_hdr_len = sizeof(struct ietf_mpa_v1);
+ priv_data_len = ntohs(mpa_frame->priv_data_len);
+
+ if (priv_data_len > IETF_MAX_PRIV_DATA_LEN) {
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: private_data too big %d\n", priv_data_len);
+ return -EOVERFLOW;
+ }
+
+ if (mpa_frame->rev != IETF_MPA_V1 && mpa_frame->rev != IETF_MPA_V2) {
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: unsupported mpa rev = %d\n", mpa_frame->rev);
+ return -EINVAL;
+ }
+
+ if (mpa_frame->rev > cm_node->mpa_frame_rev) {
+ ibdev_dbg(&cm_node->iwdev->ibdev, "CM: rev %d\n",
+ mpa_frame->rev);
+ return -EINVAL;
+ }
+
+ cm_node->mpa_frame_rev = mpa_frame->rev;
+ if (cm_node->state != IRDMA_CM_STATE_MPAREQ_SENT) {
+ if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REQ,
+ IETF_MPA_KEY_SIZE)) {
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: Unexpected MPA Key received\n");
+ return -EINVAL;
+ }
+ } else {
+ if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REP,
+ IETF_MPA_KEY_SIZE)) {
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: Unexpected MPA Key received\n");
+ return -EINVAL;
+ }
+ }
+
+ if (priv_data_len + mpa_hdr_len > len) {
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: ietf buffer len(%x + %x != %x)\n",
+ priv_data_len, mpa_hdr_len, len);
+ return -EOVERFLOW;
+ }
+
+ if (len > IRDMA_MAX_CM_BUF) {
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: ietf buffer large len = %d\n", len);
+ return -EOVERFLOW;
+ }
+
+ switch (mpa_frame->rev) {
+ case IETF_MPA_V2:
+ mpa_hdr_len += IETF_RTR_MSG_SIZE;
+ ret = irdma_negotiate_mpa_v2_ird_ord(cm_node, buf);
+ if (ret)
+ return ret;
+ break;
+ case IETF_MPA_V1:
+ default:
+ break;
+ }
+
+ memcpy(cm_node->pdata_buf, buf + mpa_hdr_len, priv_data_len);
+ cm_node->pdata.size = priv_data_len;
+
+ if (mpa_frame->flags & IETF_MPA_FLAGS_REJECT)
+ *type = IRDMA_MPA_REQUEST_REJECT;
+
+ if (mpa_frame->flags & IETF_MPA_FLAGS_MARKERS)
+ cm_node->snd_mark_en = true;
+
+ return 0;
+}
+
+/**
+ * irdma_schedule_cm_timer
+ * @cm_node: connection's node
+ * @sqbuf: buffer to send
+ * @type: if it is send or close
+ * @send_retrans: if rexmits to be done
+ * @close_when_complete: is cm_node to be removed
+ *
+ * note - cm_node needs to be protected before calling this. Encase in:
+ * irdma_rem_ref_cm_node(cm_core, cm_node);
+ * irdma_schedule_cm_timer(...)
+ * refcount_inc(&cm_node->refcnt);
+ */
+int irdma_schedule_cm_timer(struct irdma_cm_node *cm_node,
+ struct irdma_puda_buf *sqbuf,
+ enum irdma_timer_type type, int send_retrans,
+ int close_when_complete)
+{
+ struct irdma_sc_vsi *vsi = &cm_node->iwdev->vsi;
+ struct irdma_cm_core *cm_core = cm_node->cm_core;
+ struct irdma_timer_entry *new_send;
+ u32 was_timer_set;
+ unsigned long flags;
+
+ new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC);
+ if (!new_send) {
+ if (type != IRDMA_TIMER_TYPE_CLOSE)
+ irdma_free_sqbuf(vsi, sqbuf);
+ return -ENOMEM;
+ }
+
+ new_send->retrycount = IRDMA_DEFAULT_RETRYS;
+ new_send->retranscount = IRDMA_DEFAULT_RETRANS;
+ new_send->sqbuf = sqbuf;
+ new_send->timetosend = jiffies;
+ new_send->type = type;
+ new_send->send_retrans = send_retrans;
+ new_send->close_when_complete = close_when_complete;
+
+ if (type == IRDMA_TIMER_TYPE_CLOSE) {
+ new_send->timetosend += (HZ / 10);
+ if (cm_node->close_entry) {
+ kfree(new_send);
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: already close entry\n");
+ return -EINVAL;
+ }
+
+ cm_node->close_entry = new_send;
+ } else { /* type == IRDMA_TIMER_TYPE_SEND */
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ cm_node->send_entry = new_send;
+ refcount_inc(&cm_node->refcnt);
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+ new_send->timetosend = jiffies + IRDMA_RETRY_TIMEOUT;
+
+ refcount_inc(&sqbuf->refcount);
+ irdma_puda_send_buf(vsi->ilq, sqbuf);
+ if (!send_retrans) {
+ irdma_cleanup_retrans_entry(cm_node);
+ if (close_when_complete)
+ irdma_rem_ref_cm_node(cm_node);
+ return 0;
+ }
+ }
+
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+ was_timer_set = timer_pending(&cm_core->tcp_timer);
+
+ if (!was_timer_set) {
+ cm_core->tcp_timer.expires = new_send->timetosend;
+ add_timer(&cm_core->tcp_timer);
+ }
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+
+ return 0;
+}
+
+/**
+ * irdma_retrans_expired - Could not rexmit the packet
+ * @cm_node: connection's node
+ */
+static void irdma_retrans_expired(struct irdma_cm_node *cm_node)
+{
+ enum irdma_cm_node_state state = cm_node->state;
+
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ switch (state) {
+ case IRDMA_CM_STATE_SYN_RCVD:
+ case IRDMA_CM_STATE_CLOSING:
+ irdma_rem_ref_cm_node(cm_node);
+ break;
+ case IRDMA_CM_STATE_FIN_WAIT1:
+ case IRDMA_CM_STATE_LAST_ACK:
+ irdma_send_reset(cm_node);
+ break;
+ default:
+ refcount_inc(&cm_node->refcnt);
+ irdma_send_reset(cm_node);
+ irdma_create_event(cm_node, IRDMA_CM_EVENT_ABORTED);
+ break;
+ }
+}
+
+/**
+ * irdma_handle_close_entry - for handling retry/timeouts
+ * @cm_node: connection's node
+ * @rem_node: flag for remove cm_node
+ */
+static void irdma_handle_close_entry(struct irdma_cm_node *cm_node,
+ u32 rem_node)
+{
+ struct irdma_timer_entry *close_entry = cm_node->close_entry;
+ struct irdma_qp *iwqp;
+ unsigned long flags;
+
+ if (!close_entry)
+ return;
+ iwqp = (struct irdma_qp *)close_entry->sqbuf;
+ if (iwqp) {
+ spin_lock_irqsave(&iwqp->lock, flags);
+ if (iwqp->cm_id) {
+ iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSED;
+ iwqp->hw_iwarp_state = IRDMA_QP_STATE_ERROR;
+ iwqp->last_aeq = IRDMA_AE_RESET_SENT;
+ iwqp->ibqp_state = IB_QPS_ERR;
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ irdma_cm_disconn(iwqp);
+ } else {
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ }
+ } else if (rem_node) {
+ /* TIME_WAIT state */
+ irdma_rem_ref_cm_node(cm_node);
+ }
+
+ kfree(close_entry);
+ cm_node->close_entry = NULL;
+}
+
+/**
+ * irdma_cm_timer_tick - system's timer expired callback
+ * @t: Pointer to timer_list
+ */
+static void irdma_cm_timer_tick(struct timer_list *t)
+{
+ unsigned long nexttimeout = jiffies + IRDMA_LONG_TIME;
+ struct irdma_cm_node *cm_node;
+ struct irdma_timer_entry *send_entry, *close_entry;
+ struct list_head *list_core_temp;
+ struct list_head *list_node;
+ struct irdma_cm_core *cm_core = from_timer(cm_core, t, tcp_timer);
+ struct irdma_sc_vsi *vsi;
+ u32 settimer = 0;
+ unsigned long timetosend;
+ unsigned long flags;
+ struct list_head timer_list;
+
+ INIT_LIST_HEAD(&timer_list);
+
+ rcu_read_lock();
+ irdma_timer_list_prep(cm_core, &timer_list);
+ rcu_read_unlock();
+
+ list_for_each_safe (list_node, list_core_temp, &timer_list) {
+ cm_node = container_of(list_node, struct irdma_cm_node,
+ timer_entry);
+ close_entry = cm_node->close_entry;
+
+ if (close_entry) {
+ if (time_after(close_entry->timetosend, jiffies)) {
+ if (nexttimeout > close_entry->timetosend ||
+ !settimer) {
+ nexttimeout = close_entry->timetosend;
+ settimer = 1;
+ }
+ } else {
+ irdma_handle_close_entry(cm_node, 1);
+ }
+ }
+
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+
+ send_entry = cm_node->send_entry;
+ if (!send_entry)
+ goto done;
+ if (time_after(send_entry->timetosend, jiffies)) {
+ if (cm_node->state != IRDMA_CM_STATE_OFFLOADED) {
+ if (nexttimeout > send_entry->timetosend ||
+ !settimer) {
+ nexttimeout = send_entry->timetosend;
+ settimer = 1;
+ }
+ } else {
+ irdma_free_retrans_entry(cm_node);
+ }
+ goto done;
+ }
+
+ if (cm_node->state == IRDMA_CM_STATE_OFFLOADED ||
+ cm_node->state == IRDMA_CM_STATE_CLOSED) {
+ irdma_free_retrans_entry(cm_node);
+ goto done;
+ }
+
+ if (!send_entry->retranscount || !send_entry->retrycount) {
+ irdma_free_retrans_entry(cm_node);
+
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock,
+ flags);
+ irdma_retrans_expired(cm_node);
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ goto done;
+ }
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+
+ vsi = &cm_node->iwdev->vsi;
+ if (!cm_node->ack_rcvd) {
+ refcount_inc(&send_entry->sqbuf->refcount);
+ irdma_puda_send_buf(vsi->ilq, send_entry->sqbuf);
+ cm_node->cm_core->stats_pkt_retrans++;
+ }
+
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ if (send_entry->send_retrans) {
+ send_entry->retranscount--;
+ timetosend = (IRDMA_RETRY_TIMEOUT <<
+ (IRDMA_DEFAULT_RETRANS -
+ send_entry->retranscount));
+
+ send_entry->timetosend = jiffies +
+ min(timetosend, IRDMA_MAX_TIMEOUT);
+ if (nexttimeout > send_entry->timetosend || !settimer) {
+ nexttimeout = send_entry->timetosend;
+ settimer = 1;
+ }
+ } else {
+ int close_when_complete;
+
+ close_when_complete = send_entry->close_when_complete;
+ irdma_free_retrans_entry(cm_node);
+ if (close_when_complete)
+ irdma_rem_ref_cm_node(cm_node);
+ }
+done:
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+ irdma_rem_ref_cm_node(cm_node);
+ }
+
+ if (settimer) {
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+ if (!timer_pending(&cm_core->tcp_timer)) {
+ cm_core->tcp_timer.expires = nexttimeout;
+ add_timer(&cm_core->tcp_timer);
+ }
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+ }
+}
+
+/**
+ * irdma_send_syn - send SYN packet
+ * @cm_node: connection's node
+ * @sendack: flag to set ACK bit or not
+ */
+int irdma_send_syn(struct irdma_cm_node *cm_node, u32 sendack)
+{
+ struct irdma_puda_buf *sqbuf;
+ int flags = SET_SYN;
+ char optionsbuf[sizeof(struct option_mss) +
+ sizeof(struct option_windowscale) +
+ sizeof(struct option_base) + TCP_OPTIONS_PADDING];
+ struct irdma_kmem_info opts;
+ int optionssize = 0;
+ /* Sending MSS option */
+ union all_known_options *options;
+
+ opts.addr = optionsbuf;
+ if (!cm_node)
+ return -EINVAL;
+
+ options = (union all_known_options *)&optionsbuf[optionssize];
+ options->mss.optionnum = OPTION_NUM_MSS;
+ options->mss.len = sizeof(struct option_mss);
+ options->mss.mss = htons(cm_node->tcp_cntxt.mss);
+ optionssize += sizeof(struct option_mss);
+
+ options = (union all_known_options *)&optionsbuf[optionssize];
+ options->windowscale.optionnum = OPTION_NUM_WINDOW_SCALE;
+ options->windowscale.len = sizeof(struct option_windowscale);
+ options->windowscale.shiftcount = cm_node->tcp_cntxt.rcv_wscale;
+ optionssize += sizeof(struct option_windowscale);
+ options = (union all_known_options *)&optionsbuf[optionssize];
+ options->eol = OPTION_NUM_EOL;
+ optionssize += 1;
+
+ if (sendack)
+ flags |= SET_ACK;
+
+ opts.size = optionssize;
+
+ sqbuf = cm_node->cm_core->form_cm_frame(cm_node, &opts, NULL, NULL,
+ flags);
+ if (!sqbuf)
+ return -ENOMEM;
+
+ return irdma_schedule_cm_timer(cm_node, sqbuf, IRDMA_TIMER_TYPE_SEND, 1,
+ 0);
+}
+
+/**
+ * irdma_send_ack - Send ACK packet
+ * @cm_node: connection's node
+ */
+void irdma_send_ack(struct irdma_cm_node *cm_node)
+{
+ struct irdma_puda_buf *sqbuf;
+ struct irdma_sc_vsi *vsi = &cm_node->iwdev->vsi;
+
+ sqbuf = cm_node->cm_core->form_cm_frame(cm_node, NULL, NULL, NULL,
+ SET_ACK);
+ if (sqbuf)
+ irdma_puda_send_buf(vsi->ilq, sqbuf);
+}
+
+/**
+ * irdma_send_fin - Send FIN pkt
+ * @cm_node: connection's node
+ */
+static int irdma_send_fin(struct irdma_cm_node *cm_node)
+{
+ struct irdma_puda_buf *sqbuf;
+
+ sqbuf = cm_node->cm_core->form_cm_frame(cm_node, NULL, NULL, NULL,
+ SET_ACK | SET_FIN);
+ if (!sqbuf)
+ return -ENOMEM;
+
+ return irdma_schedule_cm_timer(cm_node, sqbuf, IRDMA_TIMER_TYPE_SEND, 1,
+ 0);
+}
+
+/**
+ * irdma_find_listener - find a cm node listening on this addr-port pair
+ * @cm_core: cm's core
+ * @dst_addr: listener ip addr
+ * @dst_port: listener tcp port num
+ * @vlan_id: virtual LAN ID
+ * @listener_state: state to match with listen node's
+ */
+static struct irdma_cm_listener *
+irdma_find_listener(struct irdma_cm_core *cm_core, u32 *dst_addr, u16 dst_port,
+ u16 vlan_id, enum irdma_cm_listener_state listener_state)
+{
+ struct irdma_cm_listener *listen_node;
+ static const u32 ip_zero[4] = { 0, 0, 0, 0 };
+ u32 listen_addr[4];
+ u16 listen_port;
+ unsigned long flags;
+
+ /* walk list and find cm_node associated with this session ID */
+ spin_lock_irqsave(&cm_core->listen_list_lock, flags);
+ list_for_each_entry (listen_node, &cm_core->listen_list, list) {
+ memcpy(listen_addr, listen_node->loc_addr, sizeof(listen_addr));
+ listen_port = listen_node->loc_port;
+ /* compare node pair, return node handle if a match */
+ if ((!memcmp(listen_addr, dst_addr, sizeof(listen_addr)) ||
+ !memcmp(listen_addr, ip_zero, sizeof(listen_addr))) &&
+ listen_port == dst_port &&
+ vlan_id == listen_node->vlan_id &&
+ (listener_state & listen_node->listener_state)) {
+ refcount_inc(&listen_node->refcnt);
+ spin_unlock_irqrestore(&cm_core->listen_list_lock,
+ flags);
+ trace_irdma_find_listener(listen_node);
+ return listen_node;
+ }
+ }
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+
+ return NULL;
+}
+
+/**
+ * irdma_del_multiple_qhash - Remove qhash and child listens
+ * @iwdev: iWarp device
+ * @cm_info: CM info for parent listen node
+ * @cm_parent_listen_node: The parent listen node
+ */
+static enum irdma_status_code
+irdma_del_multiple_qhash(struct irdma_device *iwdev,
+ struct irdma_cm_info *cm_info,
+ struct irdma_cm_listener *cm_parent_listen_node)
+{
+ struct irdma_cm_listener *child_listen_node;
+ enum irdma_status_code ret = IRDMA_ERR_CFG;
+ struct list_head *pos, *tpos;
+ unsigned long flags;
+
+ spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
+ list_for_each_safe (pos, tpos,
+ &cm_parent_listen_node->child_listen_list) {
+ child_listen_node = list_entry(pos, struct irdma_cm_listener,
+ child_listen_list);
+ if (child_listen_node->ipv4)
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: removing child listen for IP=%pI4, port=%d, vlan=%d\n",
+ child_listen_node->loc_addr,
+ child_listen_node->loc_port,
+ child_listen_node->vlan_id);
+ else
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: removing child listen for IP=%pI6, port=%d, vlan=%d\n",
+ child_listen_node->loc_addr,
+ child_listen_node->loc_port,
+ child_listen_node->vlan_id);
+ trace_irdma_del_multiple_qhash(child_listen_node);
+ list_del(pos);
+ memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
+ sizeof(cm_info->loc_addr));
+ cm_info->vlan_id = child_listen_node->vlan_id;
+ if (child_listen_node->qhash_set) {
+ ret = irdma_manage_qhash(iwdev, cm_info,
+ IRDMA_QHASH_TYPE_TCP_SYN,
+ IRDMA_QHASH_MANAGE_TYPE_DELETE,
+ NULL, false);
+ child_listen_node->qhash_set = false;
+ } else {
+ ret = 0;
+ }
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: Child listen node freed = %p\n",
+ child_listen_node);
+ kfree(child_listen_node);
+ cm_parent_listen_node->cm_core->stats_listen_nodes_destroyed++;
+ }
+ spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
+
+ return ret;
+}
+
+/**
+ * irdma_netdev_vlan_ipv6 - Gets the netdev and mac
+ * @addr: local IPv6 address
+ * @vlan_id: vlan id for the given IPv6 address
+ * @mac: mac address for the given IPv6 address
+ *
+ * Returns the net_device of the IPv6 address and also sets the
+ * vlan id and mac for that address.
+ */
+struct net_device *irdma_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *mac)
+{
+ struct net_device *ip_dev = NULL;
+ struct in6_addr laddr6;
+
+ if (!IS_ENABLED(CONFIG_IPV6))
+ return NULL;
+
+ irdma_copy_ip_htonl(laddr6.in6_u.u6_addr32, addr);
+ if (vlan_id)
+ *vlan_id = 0xFFFF; /* Match rdma_vlan_dev_vlan_id() */
+ if (mac)
+ eth_zero_addr(mac);
+
+ rcu_read_lock();
+ for_each_netdev_rcu (&init_net, ip_dev) {
+ if (ipv6_chk_addr(&init_net, &laddr6, ip_dev, 1)) {
+ if (vlan_id)
+ *vlan_id = rdma_vlan_dev_vlan_id(ip_dev);
+ if (ip_dev->dev_addr && mac)
+ ether_addr_copy(mac, ip_dev->dev_addr);
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return ip_dev;
+}
+
+/**
+ * irdma_get_vlan_ipv4 - Returns the vlan_id for IPv4 address
+ * @addr: local IPv4 address
+ */
+u16 irdma_get_vlan_ipv4(u32 *addr)
+{
+ struct net_device *netdev;
+ u16 vlan_id = 0xFFFF;
+
+ netdev = ip_dev_find(&init_net, htonl(addr[0]));
+ if (netdev) {
+ vlan_id = rdma_vlan_dev_vlan_id(netdev);
+ dev_put(netdev);
+ }
+
+ return vlan_id;
+}
+
+/**
+ * irdma_add_mqh_6 - Adds multiple qhashes for IPv6
+ * @iwdev: iWarp device
+ * @cm_info: CM info for parent listen node
+ * @cm_parent_listen_node: The parent listen node
+ *
+ * Adds a qhash and a child listen node for every IPv6 address
+ * on the adapter and adds the associated qhash filter
+ */
+static enum irdma_status_code
+irdma_add_mqh_6(struct irdma_device *iwdev, struct irdma_cm_info *cm_info,
+ struct irdma_cm_listener *cm_parent_listen_node)
+{
+ struct net_device *ip_dev;
+ struct inet6_dev *idev;
+ struct inet6_ifaddr *ifp, *tmp;
+ enum irdma_status_code ret = 0;
+ struct irdma_cm_listener *child_listen_node;
+ unsigned long flags;
+
+ rtnl_lock();
+ for_each_netdev(&init_net, ip_dev) {
+ if (!(ip_dev->flags & IFF_UP))
+ continue;
+
+ if (((rdma_vlan_dev_vlan_id(ip_dev) >= VLAN_N_VID) ||
+ (rdma_vlan_dev_real_dev(ip_dev) != iwdev->netdev)) &&
+ ip_dev != iwdev->netdev)
+ continue;
+
+ idev = __in6_dev_get(ip_dev);
+ if (!idev) {
+ ibdev_dbg(&iwdev->ibdev, "CM: idev == NULL\n");
+ break;
+ }
+ list_for_each_entry_safe (ifp, tmp, &idev->addr_list, if_list) {
+ ibdev_dbg(&iwdev->ibdev, "CM: IP=%pI6, vlan_id=%d, MAC=%pM\n",
+ &ifp->addr, rdma_vlan_dev_vlan_id(ip_dev),
+ ip_dev->dev_addr);
+ child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_KERNEL);
+ ibdev_dbg(&iwdev->ibdev, "CM: Allocating child listener %p\n",
+ child_listen_node);
+ if (!child_listen_node) {
+ ibdev_dbg(&iwdev->ibdev, "CM: listener memory allocation\n");
+ ret = IRDMA_ERR_NO_MEMORY;
+ goto exit;
+ }
+
+ cm_info->vlan_id = rdma_vlan_dev_vlan_id(ip_dev);
+ cm_parent_listen_node->vlan_id = cm_info->vlan_id;
+ memcpy(child_listen_node, cm_parent_listen_node,
+ sizeof(*child_listen_node));
+ irdma_copy_ip_ntohl(child_listen_node->loc_addr,
+ ifp->addr.in6_u.u6_addr32);
+ memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
+ sizeof(cm_info->loc_addr));
+ ret = irdma_manage_qhash(iwdev, cm_info,
+ IRDMA_QHASH_TYPE_TCP_SYN,
+ IRDMA_QHASH_MANAGE_TYPE_ADD,
+ NULL, true);
+ if (ret) {
+ kfree(child_listen_node);
+ continue;
+ }
+
+ trace_irdma_add_mqh_6(iwdev, child_listen_node,
+ ip_dev->dev_addr);
+
+ child_listen_node->qhash_set = true;
+ spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
+ list_add(&child_listen_node->child_listen_list,
+ &cm_parent_listen_node->child_listen_list);
+ spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
+ cm_parent_listen_node->cm_core->stats_listen_nodes_created++;
+ }
+ }
+exit:
+ rtnl_unlock();
+
+ return ret;
+}
+
+/**
+ * irdma_add_mqh_4 - Adds multiple qhashes for IPv4
+ * @iwdev: iWarp device
+ * @cm_info: CM info for parent listen node
+ * @cm_parent_listen_node: The parent listen node
+ *
+ * Adds a qhash and a child listen node for every IPv4 address
+ * on the adapter and adds the associated qhash filter
+ */
+static enum irdma_status_code
+irdma_add_mqh_4(struct irdma_device *iwdev, struct irdma_cm_info *cm_info,
+ struct irdma_cm_listener *cm_parent_listen_node)
+{
+ struct net_device *ip_dev;
+ struct in_device *idev;
+ struct irdma_cm_listener *child_listen_node;
+ enum irdma_status_code ret = 0;
+ unsigned long flags;
+ const struct in_ifaddr *ifa;
+
+ rtnl_lock();
+ for_each_netdev(&init_net, ip_dev) {
+ if (!(ip_dev->flags & IFF_UP))
+ continue;
+
+ if (((rdma_vlan_dev_vlan_id(ip_dev) >= VLAN_N_VID) ||
+ (rdma_vlan_dev_real_dev(ip_dev) != iwdev->netdev)) &&
+ ip_dev != iwdev->netdev)
+ continue;
+
+ idev = in_dev_get(ip_dev);
+ in_dev_for_each_ifa_rtnl(ifa, idev) {
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: Allocating child CM Listener forIP=%pI4, vlan_id=%d, MAC=%pM\n",
+ &ifa->ifa_address, rdma_vlan_dev_vlan_id(ip_dev),
+ ip_dev->dev_addr);
+ child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_KERNEL);
+ cm_parent_listen_node->cm_core->stats_listen_nodes_created++;
+ ibdev_dbg(&iwdev->ibdev, "CM: Allocating child listener %p\n",
+ child_listen_node);
+ if (!child_listen_node) {
+ ibdev_dbg(&iwdev->ibdev, "CM: listener memory allocation\n");
+ in_dev_put(idev);
+ ret = IRDMA_ERR_NO_MEMORY;
+ goto exit;
+ }
+
+ cm_info->vlan_id = rdma_vlan_dev_vlan_id(ip_dev);
+ cm_parent_listen_node->vlan_id = cm_info->vlan_id;
+ memcpy(child_listen_node, cm_parent_listen_node,
+ sizeof(*child_listen_node));
+ child_listen_node->loc_addr[0] =
+ ntohl(ifa->ifa_address);
+ memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
+ sizeof(cm_info->loc_addr));
+ ret = irdma_manage_qhash(iwdev, cm_info,
+ IRDMA_QHASH_TYPE_TCP_SYN,
+ IRDMA_QHASH_MANAGE_TYPE_ADD,
+ NULL, true);
+ if (ret) {
+ kfree(child_listen_node);
+ cm_parent_listen_node->cm_core
+ ->stats_listen_nodes_created--;
+ continue;
+ }
+
+ trace_irdma_add_mqh_4(iwdev, child_listen_node,
+ ip_dev->dev_addr);
+
+ child_listen_node->qhash_set = true;
+ spin_lock_irqsave(&iwdev->cm_core.listen_list_lock,
+ flags);
+ list_add(&child_listen_node->child_listen_list,
+ &cm_parent_listen_node->child_listen_list);
+ spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
+ }
+ in_dev_put(idev);
+ }
+exit:
+ rtnl_unlock();
+
+ return ret;
+}
+
+/**
+ * irdma_add_mqh - Adds multiple qhashes
+ * @iwdev: iWarp device
+ * @cm_info: CM info for parent listen node
+ * @cm_listen_node: The parent listen node
+ */
+static enum irdma_status_code
+irdma_add_mqh(struct irdma_device *iwdev, struct irdma_cm_info *cm_info,
+ struct irdma_cm_listener *cm_listen_node)
+{
+ if (cm_info->ipv4)
+ return irdma_add_mqh_4(iwdev, cm_info, cm_listen_node);
+ else
+ return irdma_add_mqh_6(iwdev, cm_info, cm_listen_node);
+}
+
+/**
+ * irdma_reset_list_prep - add connection nodes slated for reset to list
+ * @cm_core: cm's core
+ * @listener: pointer to listener node
+ * @reset_list: a list to which cm_node will be selected
+ */
+static void irdma_reset_list_prep(struct irdma_cm_core *cm_core,
+ struct irdma_cm_listener *listener,
+ struct list_head *reset_list)
+{
+ struct irdma_cm_node *cm_node;
+ int bkt;
+
+ hash_for_each_rcu(cm_core->cm_hash_tbl, bkt, cm_node, list) {
+ if (cm_node->listener == listener &&
+ !cm_node->accelerated &&
+ refcount_inc_not_zero(&cm_node->refcnt))
+ list_add(&cm_node->reset_entry, reset_list);
+ }
+}
+
+/**
+ * irdma_dec_refcnt_listen - delete listener and associated cm nodes
+ * @cm_core: cm's core
+ * @listener: pointer to listener node
+ * @free_hanging_nodes: to free associated cm_nodes
+ * @apbvt_del: flag to delete the apbvt
+ */
+static int irdma_dec_refcnt_listen(struct irdma_cm_core *cm_core,
+ struct irdma_cm_listener *listener,
+ int free_hanging_nodes, bool apbvt_del)
+{
+ int err;
+ struct list_head *list_pos;
+ struct list_head *list_temp;
+ struct irdma_cm_node *cm_node;
+ struct list_head reset_list;
+ struct irdma_cm_info nfo;
+ enum irdma_cm_node_state old_state;
+ unsigned long flags;
+
+ trace_irdma_dec_refcnt_listen(listener, __builtin_return_address(0));
+ /* free non-accelerated child nodes for this listener */
+ INIT_LIST_HEAD(&reset_list);
+ if (free_hanging_nodes) {
+ rcu_read_lock();
+ irdma_reset_list_prep(cm_core, listener, &reset_list);
+ rcu_read_unlock();
+ }
+
+ list_for_each_safe (list_pos, list_temp, &reset_list) {
+ cm_node = container_of(list_pos, struct irdma_cm_node,
+ reset_entry);
+ if (cm_node->state >= IRDMA_CM_STATE_FIN_WAIT1) {
+ irdma_rem_ref_cm_node(cm_node);
+ continue;
+ }
+
+ irdma_cleanup_retrans_entry(cm_node);
+ err = irdma_send_reset(cm_node);
+ if (err) {
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: send reset failed\n");
+ } else {
+ old_state = cm_node->state;
+ cm_node->state = IRDMA_CM_STATE_LISTENER_DESTROYED;
+ if (old_state != IRDMA_CM_STATE_MPAREQ_RCVD)
+ irdma_rem_ref_cm_node(cm_node);
+ }
+ }
+
+ if (refcount_dec_and_test(&listener->refcnt)) {
+ spin_lock_irqsave(&cm_core->listen_list_lock, flags);
+ list_del(&listener->list);
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+
+ if (apbvt_del)
+ irdma_del_apbvt(listener->iwdev,
+ listener->apbvt_entry);
+ memcpy(nfo.loc_addr, listener->loc_addr, sizeof(nfo.loc_addr));
+ nfo.loc_port = listener->loc_port;
+ nfo.ipv4 = listener->ipv4;
+ nfo.vlan_id = listener->vlan_id;
+ nfo.user_pri = listener->user_pri;
+ nfo.qh_qpid = listener->iwdev->vsi.ilq->qp_id;
+
+ if (!list_empty(&listener->child_listen_list)) {
+ irdma_del_multiple_qhash(listener->iwdev, &nfo,
+ listener);
+ } else {
+ if (listener->qhash_set)
+ irdma_manage_qhash(listener->iwdev,
+ &nfo,
+ IRDMA_QHASH_TYPE_TCP_SYN,
+ IRDMA_QHASH_MANAGE_TYPE_DELETE,
+ NULL, false);
+ }
+
+ cm_core->stats_listen_destroyed++;
+ cm_core->stats_listen_nodes_destroyed++;
+ ibdev_dbg(&listener->iwdev->ibdev,
+ "CM: loc_port=0x%04x loc_addr=%pI4 cm_listen_node=%p cm_id=%p qhash_set=%d vlan_id=%d apbvt_del=%d\n",
+ listener->loc_port, listener->loc_addr, listener,
+ listener->cm_id, listener->qhash_set,
+ listener->vlan_id, apbvt_del);
+ kfree(listener);
+ listener = NULL;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * irdma_cm_del_listen - delete a listener
+ * @cm_core: cm's core
+ * @listener: passive connection's listener
+ * @apbvt_del: flag to delete apbvt
+ */
+static int irdma_cm_del_listen(struct irdma_cm_core *cm_core,
+ struct irdma_cm_listener *listener,
+ bool apbvt_del)
+{
+ listener->listener_state = IRDMA_CM_LISTENER_PASSIVE_STATE;
+ listener->cm_id = NULL;
+
+ return irdma_dec_refcnt_listen(cm_core, listener, 1, apbvt_del);
+}
+
+/**
+ * irdma_addr_resolve_neigh - resolve neighbor address
+ * @iwdev: iwarp device structure
+ * @src_ip: local ip address
+ * @dst_ip: remote ip address
+ * @arpindex: if there is an arp entry
+ */
+static int irdma_addr_resolve_neigh(struct irdma_device *iwdev, u32 src_ip,
+ u32 dst_ip, int arpindex)
+{
+ struct rtable *rt;
+ struct neighbour *neigh;
+ int rc = arpindex;
+ __be32 dst_ipaddr = htonl(dst_ip);
+ __be32 src_ipaddr = htonl(src_ip);
+
+ rt = ip_route_output(&init_net, dst_ipaddr, src_ipaddr, 0, 0);
+ if (IS_ERR(rt)) {
+ ibdev_dbg(&iwdev->ibdev, "CM: ip_route_output fail\n");
+ return -EINVAL;
+ }
+
+ neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr);
+ if (!neigh)
+ goto exit;
+
+ if (neigh->nud_state & NUD_VALID)
+ rc = irdma_add_arp(iwdev->rf, &dst_ip, true, neigh->ha);
+ else
+ neigh_event_send(neigh, NULL);
+ if (neigh)
+ neigh_release(neigh);
+exit:
+ ip_rt_put(rt);
+
+ return rc;
+}
+
+/**
+ * irdma_get_dst_ipv6 - get destination cache entry via ipv6 lookup
+ * @src_addr: local ipv6 sock address
+ * @dst_addr: destination ipv6 sock address
+ */
+static struct dst_entry *irdma_get_dst_ipv6(struct sockaddr_in6 *src_addr,
+ struct sockaddr_in6 *dst_addr)
+{
+ struct dst_entry *dst = NULL;
+
+ if ((IS_ENABLED(CONFIG_IPV6))) {
+ struct flowi6 fl6 = {};
+
+ fl6.daddr = dst_addr->sin6_addr;
+ fl6.saddr = src_addr->sin6_addr;
+ if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
+ fl6.flowi6_oif = dst_addr->sin6_scope_id;
+
+ dst = ip6_route_output(&init_net, NULL, &fl6);
+ }
+
+ return dst;
+}
+
+/**
+ * irdma_addr_resolve_neigh_ipv6 - resolve neighbor ipv6 address
+ * @iwdev: iwarp device structure
+ * @src: local ip address
+ * @dest: remote ip address
+ * @arpindex: if there is an arp entry
+ */
+static int irdma_addr_resolve_neigh_ipv6(struct irdma_device *iwdev, u32 *src,
+ u32 *dest, int arpindex)
+{
+ struct neighbour *neigh;
+ int rc = arpindex;
+ struct dst_entry *dst;
+ struct sockaddr_in6 dst_addr = {};
+ struct sockaddr_in6 src_addr = {};
+
+ dst_addr.sin6_family = AF_INET6;
+ irdma_copy_ip_htonl(dst_addr.sin6_addr.in6_u.u6_addr32, dest);
+ src_addr.sin6_family = AF_INET6;
+ irdma_copy_ip_htonl(src_addr.sin6_addr.in6_u.u6_addr32, src);
+ dst = irdma_get_dst_ipv6(&src_addr, &dst_addr);
+ if (!dst || dst->error) {
+ if (dst) {
+ dst_release(dst);
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: ip6_route_output returned dst->error = %d\n",
+ dst->error);
+ }
+ return -EINVAL;
+ }
+
+ neigh = dst_neigh_lookup(dst, dst_addr.sin6_addr.in6_u.u6_addr32);
+ if (!neigh)
+ goto exit;
+
+ ibdev_dbg(&iwdev->ibdev, "CM: dst_neigh_lookup MAC=%pM\n",
+ neigh->ha);
+
+ trace_irdma_addr_resolve(iwdev, neigh->ha);
+
+ if (neigh->nud_state & NUD_VALID)
+ rc = irdma_add_arp(iwdev->rf, dest, false, neigh->ha);
+ else
+ neigh_event_send(neigh, NULL);
+ if (neigh)
+ neigh_release(neigh);
+exit:
+ dst_release(dst);
+
+ return rc;
+}
+
+/**
+ * irdma_find_node - find a cm node that matches the reference cm node
+ * @cm_core: cm's core
+ * @rem_port: remote tcp port num
+ * @rem_addr: remote ip addr
+ * @loc_port: local tcp port num
+ * @loc_addr: local ip addr
+ * @vlan_id: local VLAN ID
+ */
+struct irdma_cm_node *irdma_find_node(struct irdma_cm_core *cm_core,
+ u16 rem_port, u32 *rem_addr, u16 loc_port,
+ u32 *loc_addr, u16 vlan_id)
+{
+ struct irdma_cm_node *cm_node;
+ u32 key = (rem_port << 16) | loc_port;
+
+ rcu_read_lock();
+ hash_for_each_possible_rcu(cm_core->cm_hash_tbl, cm_node, list, key) {
+ if (cm_node->vlan_id == vlan_id &&
+ cm_node->loc_port == loc_port && cm_node->rem_port == rem_port &&
+ !memcmp(cm_node->loc_addr, loc_addr, sizeof(cm_node->loc_addr)) &&
+ !memcmp(cm_node->rem_addr, rem_addr, sizeof(cm_node->rem_addr))) {
+ if (!refcount_inc_not_zero(&cm_node->refcnt))
+ goto exit;
+ rcu_read_unlock();
+ trace_irdma_find_node(cm_node, 0, NULL);
+ return cm_node;
+ }
+ }
+
+exit:
+ rcu_read_unlock();
+
+ /* no owner node */
+ return NULL;
+}
+
+/**
+ * irdma_add_hte_node - add a cm node to the hash table
+ * @cm_core: cm's core
+ * @cm_node: connection's node
+ */
+static void irdma_add_hte_node(struct irdma_cm_core *cm_core,
+ struct irdma_cm_node *cm_node)
+{
+ unsigned long flags;
+ u32 key = (cm_node->rem_port << 16) | cm_node->loc_port;
+
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+ hash_add_rcu(cm_core->cm_hash_tbl, &cm_node->list, key);
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+}
+
+/**
+ * irdma_ipv4_is_lpb - check if loopback
+ * @loc_addr: local addr to compare
+ * @rem_addr: remote address
+ */
+bool irdma_ipv4_is_lpb(u32 loc_addr, u32 rem_addr)
+{
+ return ipv4_is_loopback(htonl(rem_addr)) || (loc_addr == rem_addr);
+}
+
+/**
+ * irdma_ipv6_is_lpb - check if loopback
+ * @loc_addr: local addr to compare
+ * @rem_addr: remote address
+ */
+bool irdma_ipv6_is_lpb(u32 *loc_addr, u32 *rem_addr)
+{
+ struct in6_addr raddr6;
+
+ irdma_copy_ip_htonl(raddr6.in6_u.u6_addr32, rem_addr);
+
+ return !memcmp(loc_addr, rem_addr, 16) || ipv6_addr_loopback(&raddr6);
+}
+
+/**
+ * irdma_cm_create_ah - create a cm address handle
+ * @cm_node: The connection manager node to create AH for
+ * @wait: Provides option to wait for ah creation or not
+ */
+static int irdma_cm_create_ah(struct irdma_cm_node *cm_node, bool wait)
+{
+ struct irdma_ah_info ah_info = {};
+ struct irdma_device *iwdev = cm_node->iwdev;
+
+ ether_addr_copy(ah_info.mac_addr, iwdev->netdev->dev_addr);
+
+ ah_info.hop_ttl = 0x40;
+ ah_info.tc_tos = cm_node->tos;
+ ah_info.vsi = &iwdev->vsi;
+
+ if (cm_node->ipv4) {
+ ah_info.ipv4_valid = true;
+ ah_info.dest_ip_addr[0] = cm_node->rem_addr[0];
+ ah_info.src_ip_addr[0] = cm_node->loc_addr[0];
+ ah_info.do_lpbk = irdma_ipv4_is_lpb(ah_info.src_ip_addr[0],
+ ah_info.dest_ip_addr[0]);
+ } else {
+ memcpy(ah_info.dest_ip_addr, cm_node->rem_addr,
+ sizeof(ah_info.dest_ip_addr));
+ memcpy(ah_info.src_ip_addr, cm_node->loc_addr,
+ sizeof(ah_info.src_ip_addr));
+ ah_info.do_lpbk = irdma_ipv6_is_lpb(ah_info.src_ip_addr,
+ ah_info.dest_ip_addr);
+ }
+
+ ah_info.vlan_tag = cm_node->vlan_id;
+ if (cm_node->vlan_id < VLAN_N_VID) {
+ ah_info.insert_vlan_tag = 1;
+ ah_info.vlan_tag |= cm_node->user_pri << VLAN_PRIO_SHIFT;
+ }
+
+ ah_info.dst_arpindex =
+ irdma_arp_table(iwdev->rf, ah_info.dest_ip_addr,
+ ah_info.ipv4_valid, NULL, IRDMA_ARP_RESOLVE);
+
+ if (irdma_puda_create_ah(&iwdev->rf->sc_dev, &ah_info, wait,
+ IRDMA_PUDA_RSRC_TYPE_ILQ, cm_node,
+ &cm_node->ah))
+ return -ENOMEM;
+
+ trace_irdma_create_ah(cm_node);
+ return 0;
+}
+
+/**
+ * irdma_cm_free_ah - free a cm address handle
+ * @cm_node: The connection manager node to create AH for
+ */
+static void irdma_cm_free_ah(struct irdma_cm_node *cm_node)
+{
+ struct irdma_device *iwdev = cm_node->iwdev;
+
+ trace_irdma_cm_free_ah(cm_node);
+ irdma_puda_free_ah(&iwdev->rf->sc_dev, cm_node->ah);
+ cm_node->ah = NULL;
+}
+
+/**
+ * irdma_make_cm_node - create a new instance of a cm node
+ * @cm_core: cm's core
+ * @iwdev: iwarp device structure
+ * @cm_info: quad info for connection
+ * @listener: passive connection's listener
+ */
+static struct irdma_cm_node *
+irdma_make_cm_node(struct irdma_cm_core *cm_core, struct irdma_device *iwdev,
+ struct irdma_cm_info *cm_info,
+ struct irdma_cm_listener *listener)
+{
+ struct irdma_cm_node *cm_node;
+ int oldarpindex;
+ int arpindex;
+ struct net_device *netdev = iwdev->netdev;
+
+ /* create an hte and cm_node for this instance */
+ cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
+ if (!cm_node)
+ return NULL;
+
+ /* set our node specific transport info */
+ cm_node->ipv4 = cm_info->ipv4;
+ cm_node->vlan_id = cm_info->vlan_id;
+ if (cm_node->vlan_id >= VLAN_N_VID && iwdev->dcb)
+ cm_node->vlan_id = 0;
+ cm_node->tos = cm_info->tos;
+ cm_node->user_pri = cm_info->user_pri;
+ if (listener) {
+ if (listener->tos != cm_info->tos)
+ ibdev_warn(&iwdev->ibdev,
+ "application TOS[%d] and remote client TOS[%d] mismatch\n",
+ listener->tos, cm_info->tos);
+ cm_node->tos = max(listener->tos, cm_info->tos);
+ cm_node->user_pri = rt_tos2priority(cm_node->tos);
+ ibdev_dbg(&iwdev->ibdev,
+ "DCB: listener: TOS:[%d] UP:[%d]\n", cm_node->tos,
+ cm_node->user_pri);
+ trace_irdma_listener_tos(iwdev, cm_node->tos,
+ cm_node->user_pri);
+ }
+ memcpy(cm_node->loc_addr, cm_info->loc_addr, sizeof(cm_node->loc_addr));
+ memcpy(cm_node->rem_addr, cm_info->rem_addr, sizeof(cm_node->rem_addr));
+ cm_node->loc_port = cm_info->loc_port;
+ cm_node->rem_port = cm_info->rem_port;
+
+ cm_node->mpa_frame_rev = IRDMA_CM_DEFAULT_MPA_VER;
+ cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;
+ cm_node->iwdev = iwdev;
+ cm_node->dev = &iwdev->rf->sc_dev;
+
+ cm_node->ird_size = cm_node->dev->hw_attrs.max_hw_ird;
+ cm_node->ord_size = cm_node->dev->hw_attrs.max_hw_ord;
+
+ cm_node->listener = listener;
+ cm_node->cm_id = cm_info->cm_id;
+ ether_addr_copy(cm_node->loc_mac, netdev->dev_addr);
+ spin_lock_init(&cm_node->retrans_list_lock);
+ cm_node->ack_rcvd = false;
+
+ init_completion(&cm_node->establish_comp);
+ refcount_set(&cm_node->refcnt, 1);
+ /* associate our parent CM core */
+ cm_node->cm_core = cm_core;
+ cm_node->tcp_cntxt.loc_id = IRDMA_CM_DEFAULT_LOCAL_ID;
+ cm_node->tcp_cntxt.rcv_wscale = iwdev->rcv_wscale;
+ cm_node->tcp_cntxt.rcv_wnd = iwdev->rcv_wnd >> cm_node->tcp_cntxt.rcv_wscale;
+ if (cm_node->ipv4) {
+ cm_node->tcp_cntxt.loc_seq_num = secure_tcp_seq(htonl(cm_node->loc_addr[0]),
+ htonl(cm_node->rem_addr[0]),
+ htons(cm_node->loc_port),
+ htons(cm_node->rem_port));
+ cm_node->tcp_cntxt.mss = iwdev->vsi.mtu - IRDMA_MTU_TO_MSS_IPV4;
+ } else if (IS_ENABLED(CONFIG_IPV6)) {
+ __be32 loc[4] = {
+ htonl(cm_node->loc_addr[0]), htonl(cm_node->loc_addr[1]),
+ htonl(cm_node->loc_addr[2]), htonl(cm_node->loc_addr[3])
+ };
+ __be32 rem[4] = {
+ htonl(cm_node->rem_addr[0]), htonl(cm_node->rem_addr[1]),
+ htonl(cm_node->rem_addr[2]), htonl(cm_node->rem_addr[3])
+ };
+ cm_node->tcp_cntxt.loc_seq_num = secure_tcpv6_seq(loc, rem,
+ htons(cm_node->loc_port),
+ htons(cm_node->rem_port));
+ cm_node->tcp_cntxt.mss = iwdev->vsi.mtu - IRDMA_MTU_TO_MSS_IPV6;
+ }
+
+ if ((cm_node->ipv4 &&
+ irdma_ipv4_is_lpb(cm_node->loc_addr[0], cm_node->rem_addr[0])) ||
+ (!cm_node->ipv4 &&
+ irdma_ipv6_is_lpb(cm_node->loc_addr, cm_node->rem_addr))) {
+ cm_node->do_lpb = true;
+ arpindex = irdma_arp_table(iwdev->rf, cm_node->rem_addr,
+ cm_node->ipv4, NULL,
+ IRDMA_ARP_RESOLVE);
+ } else {
+ oldarpindex = irdma_arp_table(iwdev->rf, cm_node->rem_addr,
+ cm_node->ipv4, NULL,
+ IRDMA_ARP_RESOLVE);
+ if (cm_node->ipv4)
+ arpindex = irdma_addr_resolve_neigh(iwdev,
+ cm_info->loc_addr[0],
+ cm_info->rem_addr[0],
+ oldarpindex);
+ else if (IS_ENABLED(CONFIG_IPV6))
+ arpindex = irdma_addr_resolve_neigh_ipv6(iwdev,
+ cm_info->loc_addr,
+ cm_info->rem_addr,
+ oldarpindex);
+ else
+ arpindex = -EINVAL;
+ }
+
+ if (arpindex < 0)
+ goto err;
+
+ ether_addr_copy(cm_node->rem_mac,
+ iwdev->rf->arp_table[arpindex].mac_addr);
+ irdma_add_hte_node(cm_core, cm_node);
+ cm_core->stats_nodes_created++;
+ return cm_node;
+
+err:
+ kfree(cm_node);
+
+ return NULL;
+}
+
+static void irdma_cm_node_free_cb(struct rcu_head *rcu_head)
+{
+ struct irdma_cm_node *cm_node =
+ container_of(rcu_head, struct irdma_cm_node, rcu_head);
+ struct irdma_cm_core *cm_core = cm_node->cm_core;
+ struct irdma_qp *iwqp;
+ struct irdma_cm_info nfo;
+
+ /* if the node is destroyed before connection was accelerated */
+ if (!cm_node->accelerated && cm_node->accept_pend) {
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: node destroyed before established\n");
+ atomic_dec(&cm_node->listener->pend_accepts_cnt);
+ }
+ if (cm_node->close_entry)
+ irdma_handle_close_entry(cm_node, 0);
+ if (cm_node->listener) {
+ irdma_dec_refcnt_listen(cm_core, cm_node->listener, 0, true);
+ } else {
+ if (cm_node->apbvt_set) {
+ irdma_del_apbvt(cm_node->iwdev, cm_node->apbvt_entry);
+ cm_node->apbvt_set = 0;
+ }
+ irdma_get_addr_info(cm_node, &nfo);
+ if (cm_node->qhash_set) {
+ nfo.qh_qpid = cm_node->iwdev->vsi.ilq->qp_id;
+ irdma_manage_qhash(cm_node->iwdev, &nfo,
+ IRDMA_QHASH_TYPE_TCP_ESTABLISHED,
+ IRDMA_QHASH_MANAGE_TYPE_DELETE, NULL,
+ false);
+ cm_node->qhash_set = 0;
+ }
+ }
+
+ iwqp = cm_node->iwqp;
+ if (iwqp) {
+ cm_node->cm_id->rem_ref(cm_node->cm_id);
+ cm_node->cm_id = NULL;
+ iwqp->cm_id = NULL;
+ irdma_qp_rem_ref(&iwqp->ibqp);
+ cm_node->iwqp = NULL;
+ } else if (cm_node->qhash_set) {
+ irdma_get_addr_info(cm_node, &nfo);
+ nfo.qh_qpid = cm_node->iwdev->vsi.ilq->qp_id;
+ irdma_manage_qhash(cm_node->iwdev, &nfo,
+ IRDMA_QHASH_TYPE_TCP_ESTABLISHED,
+ IRDMA_QHASH_MANAGE_TYPE_DELETE, NULL, false);
+ cm_node->qhash_set = 0;
+ }
+
+ cm_core->cm_free_ah(cm_node);
+ kfree(cm_node);
+}
+
+/**
+ * irdma_rem_ref_cm_node - destroy an instance of a cm node
+ * @cm_node: connection's node
+ */
+void irdma_rem_ref_cm_node(struct irdma_cm_node *cm_node)
+{
+ struct irdma_cm_core *cm_core = cm_node->cm_core;
+ unsigned long flags;
+
+ trace_irdma_rem_ref_cm_node(cm_node, 0, __builtin_return_address(0));
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+
+ if (!refcount_dec_and_test(&cm_node->refcnt)) {
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+ return;
+ }
+ if (cm_node->iwqp) {
+ cm_node->iwqp->cm_node = NULL;
+ cm_node->iwqp->cm_id = NULL;
+ }
+ hash_del_rcu(&cm_node->list);
+ cm_node->cm_core->stats_nodes_destroyed++;
+
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+
+ /* wait for all list walkers to exit their grace period */
+ call_rcu(&cm_node->rcu_head, irdma_cm_node_free_cb);
+}
+
+/**
+ * irdma_handle_fin_pkt - FIN packet received
+ * @cm_node: connection's node
+ */
+static void irdma_handle_fin_pkt(struct irdma_cm_node *cm_node)
+{
+ switch (cm_node->state) {
+ case IRDMA_CM_STATE_SYN_RCVD:
+ case IRDMA_CM_STATE_SYN_SENT:
+ case IRDMA_CM_STATE_ESTABLISHED:
+ case IRDMA_CM_STATE_MPAREJ_RCVD:
+ cm_node->tcp_cntxt.rcv_nxt++;
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->state = IRDMA_CM_STATE_LAST_ACK;
+ irdma_send_fin(cm_node);
+ break;
+ case IRDMA_CM_STATE_MPAREQ_SENT:
+ irdma_create_event(cm_node, IRDMA_CM_EVENT_ABORTED);
+ cm_node->tcp_cntxt.rcv_nxt++;
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ refcount_inc(&cm_node->refcnt);
+ irdma_send_reset(cm_node);
+ break;
+ case IRDMA_CM_STATE_FIN_WAIT1:
+ cm_node->tcp_cntxt.rcv_nxt++;
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->state = IRDMA_CM_STATE_CLOSING;
+ irdma_send_ack(cm_node);
+ /*
+ * Wait for ACK as this is simultaneous close.
+ * After we receive ACK, do not send anything.
+ * Just rm the node.
+ */
+ break;
+ case IRDMA_CM_STATE_FIN_WAIT2:
+ cm_node->tcp_cntxt.rcv_nxt++;
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->state = IRDMA_CM_STATE_TIME_WAIT;
+ irdma_send_ack(cm_node);
+ irdma_schedule_cm_timer(cm_node, NULL, IRDMA_TIMER_TYPE_CLOSE,
+ 1, 0);
+ break;
+ case IRDMA_CM_STATE_TIME_WAIT:
+ cm_node->tcp_cntxt.rcv_nxt++;
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ irdma_rem_ref_cm_node(cm_node);
+ break;
+ case IRDMA_CM_STATE_OFFLOADED:
+ default:
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: bad state node state = %d\n", cm_node->state);
+ break;
+ }
+}
+
+/**
+ * irdma_handle_rst_pkt - process received RST packet
+ * @cm_node: connection's node
+ * @rbuf: receive buffer
+ */
+static void irdma_handle_rst_pkt(struct irdma_cm_node *cm_node,
+ struct irdma_puda_buf *rbuf)
+{
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: caller: %pS cm_node=%p state=%d rem_port=0x%04x loc_port=0x%04x rem_addr=%pI4 loc_addr=%pI4\n",
+ __builtin_return_address(0), cm_node, cm_node->state,
+ cm_node->rem_port, cm_node->loc_port, cm_node->rem_addr,
+ cm_node->loc_addr);
+
+ irdma_cleanup_retrans_entry(cm_node);
+ switch (cm_node->state) {
+ case IRDMA_CM_STATE_SYN_SENT:
+ case IRDMA_CM_STATE_MPAREQ_SENT:
+ switch (cm_node->mpa_frame_rev) {
+ case IETF_MPA_V2:
+ /* Drop down to MPA_V1*/
+ cm_node->mpa_frame_rev = IETF_MPA_V1;
+ /* send a syn and goto syn sent state */
+ cm_node->state = IRDMA_CM_STATE_SYN_SENT;
+ if (irdma_send_syn(cm_node, 0))
+ irdma_active_open_err(cm_node, false);
+ break;
+ case IETF_MPA_V1:
+ default:
+ irdma_active_open_err(cm_node, false);
+ break;
+ }
+ break;
+ case IRDMA_CM_STATE_MPAREQ_RCVD:
+ atomic_inc(&cm_node->passive_state);
+ break;
+ case IRDMA_CM_STATE_ESTABLISHED:
+ case IRDMA_CM_STATE_SYN_RCVD:
+ case IRDMA_CM_STATE_LISTENING:
+ irdma_passive_open_err(cm_node, false);
+ break;
+ case IRDMA_CM_STATE_OFFLOADED:
+ irdma_active_open_err(cm_node, false);
+ break;
+ case IRDMA_CM_STATE_CLOSED:
+ break;
+ case IRDMA_CM_STATE_FIN_WAIT2:
+ case IRDMA_CM_STATE_FIN_WAIT1:
+ case IRDMA_CM_STATE_LAST_ACK:
+ case IRDMA_CM_STATE_TIME_WAIT:
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ irdma_rem_ref_cm_node(cm_node);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * irdma_handle_rcv_mpa - Process a recv'd mpa buffer
+ * @cm_node: connection's node
+ * @rbuf: receive buffer
+ */
+static void irdma_handle_rcv_mpa(struct irdma_cm_node *cm_node,
+ struct irdma_puda_buf *rbuf)
+{
+ int err;
+ int datasize = rbuf->datalen;
+ u8 *dataloc = rbuf->data;
+
+ enum irdma_cm_event_type type = IRDMA_CM_EVENT_UNKNOWN;
+ u32 res_type;
+
+ err = irdma_parse_mpa(cm_node, dataloc, &res_type, datasize);
+ if (err) {
+ if (cm_node->state == IRDMA_CM_STATE_MPAREQ_SENT)
+ irdma_active_open_err(cm_node, true);
+ else
+ irdma_passive_open_err(cm_node, true);
+ return;
+ }
+
+ switch (cm_node->state) {
+ case IRDMA_CM_STATE_ESTABLISHED:
+ if (res_type == IRDMA_MPA_REQUEST_REJECT)
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: state for reject\n");
+ cm_node->state = IRDMA_CM_STATE_MPAREQ_RCVD;
+ type = IRDMA_CM_EVENT_MPA_REQ;
+ irdma_send_ack(cm_node); /* ACK received MPA request */
+ atomic_set(&cm_node->passive_state,
+ IRDMA_PASSIVE_STATE_INDICATED);
+ break;
+ case IRDMA_CM_STATE_MPAREQ_SENT:
+ irdma_cleanup_retrans_entry(cm_node);
+ if (res_type == IRDMA_MPA_REQUEST_REJECT) {
+ type = IRDMA_CM_EVENT_MPA_REJECT;
+ cm_node->state = IRDMA_CM_STATE_MPAREJ_RCVD;
+ } else {
+ type = IRDMA_CM_EVENT_CONNECTED;
+ cm_node->state = IRDMA_CM_STATE_OFFLOADED;
+ }
+ irdma_send_ack(cm_node);
+ break;
+ default:
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: wrong cm_node state =%d\n", cm_node->state);
+ break;
+ }
+ irdma_create_event(cm_node, type);
+}
+
+/**
+ * irdma_check_syn - Check for error on received syn ack
+ * @cm_node: connection's node
+ * @tcph: pointer tcp header
+ */
+static int irdma_check_syn(struct irdma_cm_node *cm_node, struct tcphdr *tcph)
+{
+ if (ntohl(tcph->ack_seq) != cm_node->tcp_cntxt.loc_seq_num) {
+ irdma_active_open_err(cm_node, true);
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_check_seq - check seq numbers if OK
+ * @cm_node: connection's node
+ * @tcph: pointer tcp header
+ */
+static int irdma_check_seq(struct irdma_cm_node *cm_node, struct tcphdr *tcph)
+{
+ u32 seq;
+ u32 ack_seq;
+ u32 loc_seq_num = cm_node->tcp_cntxt.loc_seq_num;
+ u32 rcv_nxt = cm_node->tcp_cntxt.rcv_nxt;
+ u32 rcv_wnd;
+ int err = 0;
+
+ seq = ntohl(tcph->seq);
+ ack_seq = ntohl(tcph->ack_seq);
+ rcv_wnd = cm_node->tcp_cntxt.rcv_wnd;
+ if (ack_seq != loc_seq_num ||
+ !between(seq, rcv_nxt, (rcv_nxt + rcv_wnd)))
+ err = -1;
+ if (err)
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: seq number err\n");
+
+ return err;
+}
+
+void irdma_add_conn_est_qh(struct irdma_cm_node *cm_node)
+{
+ struct irdma_cm_info nfo;
+
+ irdma_get_addr_info(cm_node, &nfo);
+ nfo.qh_qpid = cm_node->iwdev->vsi.ilq->qp_id;
+ irdma_manage_qhash(cm_node->iwdev, &nfo,
+ IRDMA_QHASH_TYPE_TCP_ESTABLISHED,
+ IRDMA_QHASH_MANAGE_TYPE_ADD,
+ cm_node, false);
+ cm_node->qhash_set = true;
+}
+
+/**
+ * irdma_handle_syn_pkt - is for Passive node
+ * @cm_node: connection's node
+ * @rbuf: receive buffer
+ */
+static void irdma_handle_syn_pkt(struct irdma_cm_node *cm_node,
+ struct irdma_puda_buf *rbuf)
+{
+ struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
+ int err;
+ u32 inc_sequence;
+ int optionsize;
+
+ optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
+ inc_sequence = ntohl(tcph->seq);
+
+ switch (cm_node->state) {
+ case IRDMA_CM_STATE_SYN_SENT:
+ case IRDMA_CM_STATE_MPAREQ_SENT:
+ /* Rcvd syn on active open connection */
+ irdma_active_open_err(cm_node, 1);
+ break;
+ case IRDMA_CM_STATE_LISTENING:
+ /* Passive OPEN */
+ if (atomic_read(&cm_node->listener->pend_accepts_cnt) >
+ cm_node->listener->backlog) {
+ cm_node->cm_core->stats_backlog_drops++;
+ irdma_passive_open_err(cm_node, false);
+ break;
+ }
+ err = irdma_handle_tcp_options(cm_node, tcph, optionsize, 1);
+ if (err) {
+ irdma_passive_open_err(cm_node, false);
+ /* drop pkt */
+ break;
+ }
+ err = cm_node->cm_core->cm_create_ah(cm_node, false);
+ if (err) {
+ irdma_passive_open_err(cm_node, false);
+ /* drop pkt */
+ break;
+ }
+ cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1;
+ cm_node->accept_pend = 1;
+ atomic_inc(&cm_node->listener->pend_accepts_cnt);
+
+ cm_node->state = IRDMA_CM_STATE_SYN_RCVD;
+ break;
+ case IRDMA_CM_STATE_CLOSED:
+ irdma_cleanup_retrans_entry(cm_node);
+ refcount_inc(&cm_node->refcnt);
+ irdma_send_reset(cm_node);
+ break;
+ case IRDMA_CM_STATE_OFFLOADED:
+ case IRDMA_CM_STATE_ESTABLISHED:
+ case IRDMA_CM_STATE_FIN_WAIT1:
+ case IRDMA_CM_STATE_FIN_WAIT2:
+ case IRDMA_CM_STATE_MPAREQ_RCVD:
+ case IRDMA_CM_STATE_LAST_ACK:
+ case IRDMA_CM_STATE_CLOSING:
+ case IRDMA_CM_STATE_UNKNOWN:
+ default:
+ break;
+ }
+}
+
+/**
+ * irdma_handle_synack_pkt - Process SYN+ACK packet (active side)
+ * @cm_node: connection's node
+ * @rbuf: receive buffer
+ */
+static void irdma_handle_synack_pkt(struct irdma_cm_node *cm_node,
+ struct irdma_puda_buf *rbuf)
+{
+ struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
+ int err;
+ u32 inc_sequence;
+ int optionsize;
+
+ optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
+ inc_sequence = ntohl(tcph->seq);
+ switch (cm_node->state) {
+ case IRDMA_CM_STATE_SYN_SENT:
+ irdma_cleanup_retrans_entry(cm_node);
+ /* active open */
+ if (irdma_check_syn(cm_node, tcph)) {
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: check syn fail\n");
+ return;
+ }
+ cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
+ /* setup options */
+ err = irdma_handle_tcp_options(cm_node, tcph, optionsize, 0);
+ if (err) {
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: cm_node=%p tcp_options failed\n",
+ cm_node);
+ break;
+ }
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1;
+ irdma_send_ack(cm_node); /* ACK for the syn_ack */
+ err = irdma_send_mpa_request(cm_node);
+ if (err) {
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: cm_node=%p irdma_send_mpa_request failed\n",
+ cm_node);
+ break;
+ }
+ cm_node->state = IRDMA_CM_STATE_MPAREQ_SENT;
+ break;
+ case IRDMA_CM_STATE_MPAREQ_RCVD:
+ irdma_passive_open_err(cm_node, true);
+ break;
+ case IRDMA_CM_STATE_LISTENING:
+ cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ irdma_send_reset(cm_node);
+ break;
+ case IRDMA_CM_STATE_CLOSED:
+ cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
+ irdma_cleanup_retrans_entry(cm_node);
+ refcount_inc(&cm_node->refcnt);
+ irdma_send_reset(cm_node);
+ break;
+ case IRDMA_CM_STATE_ESTABLISHED:
+ case IRDMA_CM_STATE_FIN_WAIT1:
+ case IRDMA_CM_STATE_FIN_WAIT2:
+ case IRDMA_CM_STATE_LAST_ACK:
+ case IRDMA_CM_STATE_OFFLOADED:
+ case IRDMA_CM_STATE_CLOSING:
+ case IRDMA_CM_STATE_UNKNOWN:
+ case IRDMA_CM_STATE_MPAREQ_SENT:
+ default:
+ break;
+ }
+}
+
+/**
+ * irdma_handle_ack_pkt - process packet with ACK
+ * @cm_node: connection's node
+ * @rbuf: receive buffer
+ */
+static int irdma_handle_ack_pkt(struct irdma_cm_node *cm_node,
+ struct irdma_puda_buf *rbuf)
+{
+ struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
+ u32 inc_sequence;
+ int ret;
+ int optionsize;
+ u32 datasize = rbuf->datalen;
+
+ optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
+
+ if (irdma_check_seq(cm_node, tcph))
+ return -EINVAL;
+
+ inc_sequence = ntohl(tcph->seq);
+ switch (cm_node->state) {
+ case IRDMA_CM_STATE_SYN_RCVD:
+ irdma_cleanup_retrans_entry(cm_node);
+ ret = irdma_handle_tcp_options(cm_node, tcph, optionsize, 1);
+ if (ret)
+ return ret;
+ cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
+ cm_node->state = IRDMA_CM_STATE_ESTABLISHED;
+ if (datasize) {
+ cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
+ irdma_handle_rcv_mpa(cm_node, rbuf);
+ }
+ break;
+ case IRDMA_CM_STATE_ESTABLISHED:
+ irdma_cleanup_retrans_entry(cm_node);
+ if (datasize) {
+ cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
+ irdma_handle_rcv_mpa(cm_node, rbuf);
+ }
+ break;
+ case IRDMA_CM_STATE_MPAREQ_SENT:
+ cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
+ if (datasize) {
+ cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
+ cm_node->ack_rcvd = false;
+ irdma_handle_rcv_mpa(cm_node, rbuf);
+ } else {
+ cm_node->ack_rcvd = true;
+ }
+ break;
+ case IRDMA_CM_STATE_LISTENING:
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ irdma_send_reset(cm_node);
+ break;
+ case IRDMA_CM_STATE_CLOSED:
+ irdma_cleanup_retrans_entry(cm_node);
+ refcount_inc(&cm_node->refcnt);
+ irdma_send_reset(cm_node);
+ break;
+ case IRDMA_CM_STATE_LAST_ACK:
+ case IRDMA_CM_STATE_CLOSING:
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ irdma_rem_ref_cm_node(cm_node);
+ break;
+ case IRDMA_CM_STATE_FIN_WAIT1:
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->state = IRDMA_CM_STATE_FIN_WAIT2;
+ break;
+ case IRDMA_CM_STATE_SYN_SENT:
+ case IRDMA_CM_STATE_FIN_WAIT2:
+ case IRDMA_CM_STATE_OFFLOADED:
+ case IRDMA_CM_STATE_MPAREQ_RCVD:
+ case IRDMA_CM_STATE_UNKNOWN:
+ default:
+ irdma_cleanup_retrans_entry(cm_node);
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_process_pkt - process cm packet
+ * @cm_node: connection's node
+ * @rbuf: receive buffer
+ */
+static void irdma_process_pkt(struct irdma_cm_node *cm_node,
+ struct irdma_puda_buf *rbuf)
+{
+ enum irdma_tcpip_pkt_type pkt_type = IRDMA_PKT_TYPE_UNKNOWN;
+ struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
+ u32 fin_set = 0;
+ int err;
+
+ if (tcph->rst) {
+ pkt_type = IRDMA_PKT_TYPE_RST;
+ } else if (tcph->syn) {
+ pkt_type = IRDMA_PKT_TYPE_SYN;
+ if (tcph->ack)
+ pkt_type = IRDMA_PKT_TYPE_SYNACK;
+ } else if (tcph->ack) {
+ pkt_type = IRDMA_PKT_TYPE_ACK;
+ }
+ if (tcph->fin)
+ fin_set = 1;
+
+ switch (pkt_type) {
+ case IRDMA_PKT_TYPE_SYN:
+ irdma_handle_syn_pkt(cm_node, rbuf);
+ break;
+ case IRDMA_PKT_TYPE_SYNACK:
+ irdma_handle_synack_pkt(cm_node, rbuf);
+ break;
+ case IRDMA_PKT_TYPE_ACK:
+ err = irdma_handle_ack_pkt(cm_node, rbuf);
+ if (fin_set && !err)
+ irdma_handle_fin_pkt(cm_node);
+ break;
+ case IRDMA_PKT_TYPE_RST:
+ irdma_handle_rst_pkt(cm_node, rbuf);
+ break;
+ default:
+ if (fin_set &&
+ (!irdma_check_seq(cm_node, (struct tcphdr *)rbuf->tcph)))
+ irdma_handle_fin_pkt(cm_node);
+ break;
+ }
+}
+
+/**
+ * irdma_make_listen_node - create a listen node with params
+ * @cm_core: cm's core
+ * @iwdev: iwarp device structure
+ * @cm_info: quad info for connection
+ */
+static struct irdma_cm_listener *
+irdma_make_listen_node(struct irdma_cm_core *cm_core,
+ struct irdma_device *iwdev,
+ struct irdma_cm_info *cm_info)
+{
+ struct irdma_cm_listener *listener;
+ unsigned long flags;
+
+ /* cannot have multiple matching listeners */
+ listener = irdma_find_listener(cm_core, cm_info->loc_addr,
+ cm_info->loc_port, cm_info->vlan_id,
+ IRDMA_CM_LISTENER_EITHER_STATE);
+ if (listener &&
+ listener->listener_state == IRDMA_CM_LISTENER_ACTIVE_STATE) {
+ refcount_dec(&listener->refcnt);
+ return NULL;
+ }
+
+ if (!listener) {
+ /* create a CM listen node
+ * 1/2 node to compare incoming traffic to
+ */
+ listener = kzalloc(sizeof(*listener), GFP_KERNEL);
+ if (!listener)
+ return NULL;
+ cm_core->stats_listen_nodes_created++;
+ memcpy(listener->loc_addr, cm_info->loc_addr,
+ sizeof(listener->loc_addr));
+ listener->loc_port = cm_info->loc_port;
+
+ INIT_LIST_HEAD(&listener->child_listen_list);
+
+ refcount_set(&listener->refcnt, 1);
+ } else {
+ listener->reused_node = 1;
+ }
+
+ listener->cm_id = cm_info->cm_id;
+ listener->ipv4 = cm_info->ipv4;
+ listener->vlan_id = cm_info->vlan_id;
+ atomic_set(&listener->pend_accepts_cnt, 0);
+ listener->cm_core = cm_core;
+ listener->iwdev = iwdev;
+
+ listener->backlog = cm_info->backlog;
+ listener->listener_state = IRDMA_CM_LISTENER_ACTIVE_STATE;
+
+ if (!listener->reused_node) {
+ spin_lock_irqsave(&cm_core->listen_list_lock, flags);
+ list_add(&listener->list, &cm_core->listen_list);
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+ }
+
+ return listener;
+}
+
+/**
+ * irdma_create_cm_node - make a connection node with params
+ * @cm_core: cm's core
+ * @iwdev: iwarp device structure
+ * @conn_param: connection parameters
+ * @cm_info: quad info for connection
+ * @caller_cm_node: pointer to cm_node structure to return
+ */
+static int irdma_create_cm_node(struct irdma_cm_core *cm_core,
+ struct irdma_device *iwdev,
+ struct iw_cm_conn_param *conn_param,
+ struct irdma_cm_info *cm_info,
+ struct irdma_cm_node **caller_cm_node)
+{
+ struct irdma_cm_node *cm_node;
+ u16 private_data_len = conn_param->private_data_len;
+ const void *private_data = conn_param->private_data;
+
+ /* create a CM connection node */
+ cm_node = irdma_make_cm_node(cm_core, iwdev, cm_info, NULL);
+ if (!cm_node)
+ return -ENOMEM;
+
+ /* set our node side to client (active) side */
+ cm_node->tcp_cntxt.client = 1;
+ cm_node->tcp_cntxt.rcv_wscale = IRDMA_CM_DEFAULT_RCV_WND_SCALE;
+
+ irdma_record_ird_ord(cm_node, conn_param->ird, conn_param->ord);
+
+ cm_node->pdata.size = private_data_len;
+ cm_node->pdata.addr = cm_node->pdata_buf;
+
+ memcpy(cm_node->pdata_buf, private_data, private_data_len);
+ *caller_cm_node = cm_node;
+
+ return 0;
+}
+
+/**
+ * irdma_cm_reject - reject and teardown a connection
+ * @cm_node: connection's node
+ * @pdata: ptr to private data for reject
+ * @plen: size of private data
+ */
+static int irdma_cm_reject(struct irdma_cm_node *cm_node, const void *pdata,
+ u8 plen)
+{
+ int ret;
+ int passive_state;
+
+ if (cm_node->tcp_cntxt.client)
+ return 0;
+
+ irdma_cleanup_retrans_entry(cm_node);
+
+ passive_state = atomic_add_return(1, &cm_node->passive_state);
+ if (passive_state == IRDMA_SEND_RESET_EVENT) {
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ irdma_rem_ref_cm_node(cm_node);
+ return 0;
+ }
+
+ if (cm_node->state == IRDMA_CM_STATE_LISTENER_DESTROYED) {
+ irdma_rem_ref_cm_node(cm_node);
+ return 0;
+ }
+
+ ret = irdma_send_mpa_reject(cm_node, pdata, plen);
+ if (!ret)
+ return 0;
+
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ if (irdma_send_reset(cm_node))
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: send reset failed\n");
+
+ return ret;
+}
+
+/**
+ * irdma_cm_close - close of cm connection
+ * @cm_node: connection's node
+ */
+static int irdma_cm_close(struct irdma_cm_node *cm_node)
+{
+ switch (cm_node->state) {
+ case IRDMA_CM_STATE_SYN_RCVD:
+ case IRDMA_CM_STATE_SYN_SENT:
+ case IRDMA_CM_STATE_ONE_SIDE_ESTABLISHED:
+ case IRDMA_CM_STATE_ESTABLISHED:
+ case IRDMA_CM_STATE_ACCEPTING:
+ case IRDMA_CM_STATE_MPAREQ_SENT:
+ case IRDMA_CM_STATE_MPAREQ_RCVD:
+ irdma_cleanup_retrans_entry(cm_node);
+ irdma_send_reset(cm_node);
+ break;
+ case IRDMA_CM_STATE_CLOSE_WAIT:
+ cm_node->state = IRDMA_CM_STATE_LAST_ACK;
+ irdma_send_fin(cm_node);
+ break;
+ case IRDMA_CM_STATE_FIN_WAIT1:
+ case IRDMA_CM_STATE_FIN_WAIT2:
+ case IRDMA_CM_STATE_LAST_ACK:
+ case IRDMA_CM_STATE_TIME_WAIT:
+ case IRDMA_CM_STATE_CLOSING:
+ return -EINVAL;
+ case IRDMA_CM_STATE_LISTENING:
+ irdma_cleanup_retrans_entry(cm_node);
+ irdma_send_reset(cm_node);
+ break;
+ case IRDMA_CM_STATE_MPAREJ_RCVD:
+ case IRDMA_CM_STATE_UNKNOWN:
+ case IRDMA_CM_STATE_INITED:
+ case IRDMA_CM_STATE_CLOSED:
+ case IRDMA_CM_STATE_LISTENER_DESTROYED:
+ irdma_rem_ref_cm_node(cm_node);
+ break;
+ case IRDMA_CM_STATE_OFFLOADED:
+ if (cm_node->send_entry)
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: CM send_entry in OFFLOADED state\n");
+ irdma_rem_ref_cm_node(cm_node);
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_receive_ilq - recv an ETHERNET packet, and process it
+ * through CM
+ * @vsi: VSI structure of dev
+ * @rbuf: receive buffer
+ */
+void irdma_receive_ilq(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *rbuf)
+{
+ struct irdma_cm_node *cm_node;
+ struct irdma_cm_listener *listener;
+ struct iphdr *iph;
+ struct ipv6hdr *ip6h;
+ struct tcphdr *tcph;
+ struct irdma_cm_info cm_info = {};
+ struct irdma_device *iwdev = vsi->back_vsi;
+ struct irdma_cm_core *cm_core = &iwdev->cm_core;
+ struct vlan_ethhdr *ethh;
+ u16 vtag;
+
+ /* if vlan, then maclen = 18 else 14 */
+ iph = (struct iphdr *)rbuf->iph;
+ print_hex_dump_debug("ILQ: RECEIVE ILQ BUFFER", DUMP_PREFIX_OFFSET,
+ 16, 8, rbuf->mem.va, rbuf->totallen, false);
+ if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ if (rbuf->vlan_valid) {
+ vtag = rbuf->vlan_id;
+ cm_info.user_pri = (vtag & VLAN_PRIO_MASK) >>
+ VLAN_PRIO_SHIFT;
+ cm_info.vlan_id = vtag & VLAN_VID_MASK;
+ } else {
+ cm_info.vlan_id = 0xFFFF;
+ }
+ } else {
+ ethh = rbuf->mem.va;
+
+ if (ethh->h_vlan_proto == htons(ETH_P_8021Q)) {
+ vtag = ntohs(ethh->h_vlan_TCI);
+ cm_info.user_pri = (vtag & VLAN_PRIO_MASK) >>
+ VLAN_PRIO_SHIFT;
+ cm_info.vlan_id = vtag & VLAN_VID_MASK;
+ ibdev_dbg(&cm_core->iwdev->ibdev,
+ "CM: vlan_id=%d\n", cm_info.vlan_id);
+ } else {
+ cm_info.vlan_id = 0xFFFF;
+ }
+ }
+ tcph = (struct tcphdr *)rbuf->tcph;
+
+ if (rbuf->ipv4) {
+ cm_info.loc_addr[0] = ntohl(iph->daddr);
+ cm_info.rem_addr[0] = ntohl(iph->saddr);
+ cm_info.ipv4 = true;
+ cm_info.tos = iph->tos;
+ } else {
+ ip6h = (struct ipv6hdr *)rbuf->iph;
+ irdma_copy_ip_ntohl(cm_info.loc_addr,
+ ip6h->daddr.in6_u.u6_addr32);
+ irdma_copy_ip_ntohl(cm_info.rem_addr,
+ ip6h->saddr.in6_u.u6_addr32);
+ cm_info.ipv4 = false;
+ cm_info.tos = (ip6h->priority << 4) | (ip6h->flow_lbl[0] >> 4);
+ }
+ cm_info.loc_port = ntohs(tcph->dest);
+ cm_info.rem_port = ntohs(tcph->source);
+ cm_node = irdma_find_node(cm_core, cm_info.rem_port, cm_info.rem_addr,
+ cm_info.loc_port, cm_info.loc_addr, cm_info.vlan_id);
+
+ if (!cm_node) {
+ /* Only type of packet accepted are for the
+ * PASSIVE open (syn only)
+ */
+ if (!tcph->syn || tcph->ack)
+ return;
+
+ listener = irdma_find_listener(cm_core,
+ cm_info.loc_addr,
+ cm_info.loc_port,
+ cm_info.vlan_id,
+ IRDMA_CM_LISTENER_ACTIVE_STATE);
+ if (!listener) {
+ cm_info.cm_id = NULL;
+ ibdev_dbg(&cm_core->iwdev->ibdev,
+ "CM: no listener found\n");
+ return;
+ }
+
+ cm_info.cm_id = listener->cm_id;
+ cm_node = irdma_make_cm_node(cm_core, iwdev, &cm_info,
+ listener);
+ if (!cm_node) {
+ ibdev_dbg(&cm_core->iwdev->ibdev,
+ "CM: allocate node failed\n");
+ refcount_dec(&listener->refcnt);
+ return;
+ }
+
+ if (!tcph->rst && !tcph->fin) {
+ cm_node->state = IRDMA_CM_STATE_LISTENING;
+ } else {
+ irdma_rem_ref_cm_node(cm_node);
+ return;
+ }
+
+ refcount_inc(&cm_node->refcnt);
+ } else if (cm_node->state == IRDMA_CM_STATE_OFFLOADED) {
+ irdma_rem_ref_cm_node(cm_node);
+ return;
+ }
+
+ irdma_process_pkt(cm_node, rbuf);
+ irdma_rem_ref_cm_node(cm_node);
+}
+
+static int irdma_add_qh(struct irdma_cm_node *cm_node, bool active)
+{
+ if (!active)
+ irdma_add_conn_est_qh(cm_node);
+ return 0;
+}
+
+static void irdma_cm_free_ah_nop(struct irdma_cm_node *cm_node)
+{
+}
+
+/**
+ * irdma_setup_cm_core - setup top level instance of a cm core
+ * @iwdev: iwarp device structure
+ * @rdma_ver: HW version
+ */
+enum irdma_status_code irdma_setup_cm_core(struct irdma_device *iwdev,
+ u8 rdma_ver)
+{
+ struct irdma_cm_core *cm_core = &iwdev->cm_core;
+
+ cm_core->iwdev = iwdev;
+ cm_core->dev = &iwdev->rf->sc_dev;
+
+ /* Handles CM event work items send to Iwarp core */
+ cm_core->event_wq = alloc_ordered_workqueue("iwarp-event-wq", 0);
+ if (!cm_core->event_wq)
+ return IRDMA_ERR_NO_MEMORY;
+
+ INIT_LIST_HEAD(&cm_core->listen_list);
+
+ timer_setup(&cm_core->tcp_timer, irdma_cm_timer_tick, 0);
+
+ spin_lock_init(&cm_core->ht_lock);
+ spin_lock_init(&cm_core->listen_list_lock);
+ spin_lock_init(&cm_core->apbvt_lock);
+ switch (rdma_ver) {
+ case IRDMA_GEN_1:
+ cm_core->form_cm_frame = irdma_form_uda_cm_frame;
+ cm_core->cm_create_ah = irdma_add_qh;
+ cm_core->cm_free_ah = irdma_cm_free_ah_nop;
+ break;
+ case IRDMA_GEN_2:
+ default:
+ cm_core->form_cm_frame = irdma_form_ah_cm_frame;
+ cm_core->cm_create_ah = irdma_cm_create_ah;
+ cm_core->cm_free_ah = irdma_cm_free_ah;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_cleanup_cm_core - deallocate a top level instance of a
+ * cm core
+ * @cm_core: cm's core
+ */
+void irdma_cleanup_cm_core(struct irdma_cm_core *cm_core)
+{
+ unsigned long flags;
+
+ if (!cm_core)
+ return;
+
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+ if (timer_pending(&cm_core->tcp_timer))
+ del_timer_sync(&cm_core->tcp_timer);
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+
+ destroy_workqueue(cm_core->event_wq);
+ cm_core->dev->ws_reset(&cm_core->iwdev->vsi);
+}
+
+/**
+ * irdma_init_tcp_ctx - setup qp context
+ * @cm_node: connection's node
+ * @tcp_info: offload info for tcp
+ * @iwqp: associate qp for the connection
+ */
+static void irdma_init_tcp_ctx(struct irdma_cm_node *cm_node,
+ struct irdma_tcp_offload_info *tcp_info,
+ struct irdma_qp *iwqp)
+{
+ tcp_info->ipv4 = cm_node->ipv4;
+ tcp_info->drop_ooo_seg = !iwqp->iwdev->iw_ooo;
+ tcp_info->wscale = true;
+ tcp_info->ignore_tcp_opt = true;
+ tcp_info->ignore_tcp_uns_opt = true;
+ tcp_info->no_nagle = false;
+
+ tcp_info->ttl = IRDMA_DEFAULT_TTL;
+ tcp_info->rtt_var = IRDMA_DEFAULT_RTT_VAR;
+ tcp_info->ss_thresh = IRDMA_DEFAULT_SS_THRESH;
+ tcp_info->rexmit_thresh = IRDMA_DEFAULT_REXMIT_THRESH;
+
+ tcp_info->tcp_state = IRDMA_TCP_STATE_ESTABLISHED;
+ tcp_info->snd_wscale = cm_node->tcp_cntxt.snd_wscale;
+ tcp_info->rcv_wscale = cm_node->tcp_cntxt.rcv_wscale;
+
+ tcp_info->snd_nxt = cm_node->tcp_cntxt.loc_seq_num;
+ tcp_info->snd_wnd = cm_node->tcp_cntxt.snd_wnd;
+ tcp_info->rcv_nxt = cm_node->tcp_cntxt.rcv_nxt;
+ tcp_info->snd_max = cm_node->tcp_cntxt.loc_seq_num;
+
+ tcp_info->snd_una = cm_node->tcp_cntxt.loc_seq_num;
+ tcp_info->cwnd = 2 * cm_node->tcp_cntxt.mss;
+ tcp_info->snd_wl1 = cm_node->tcp_cntxt.rcv_nxt;
+ tcp_info->snd_wl2 = cm_node->tcp_cntxt.loc_seq_num;
+ tcp_info->max_snd_window = cm_node->tcp_cntxt.max_snd_wnd;
+ tcp_info->rcv_wnd = cm_node->tcp_cntxt.rcv_wnd
+ << cm_node->tcp_cntxt.rcv_wscale;
+
+ tcp_info->flow_label = 0;
+ tcp_info->snd_mss = (u32)cm_node->tcp_cntxt.mss;
+ tcp_info->tos = cm_node->tos;
+ if (cm_node->vlan_id < VLAN_N_VID) {
+ tcp_info->insert_vlan_tag = true;
+ tcp_info->vlan_tag = cm_node->vlan_id;
+ tcp_info->vlan_tag |= cm_node->user_pri << VLAN_PRIO_SHIFT;
+ }
+ if (cm_node->ipv4) {
+ tcp_info->src_port = cm_node->loc_port;
+ tcp_info->dst_port = cm_node->rem_port;
+
+ tcp_info->dest_ip_addr[3] = cm_node->rem_addr[0];
+ tcp_info->local_ipaddr[3] = cm_node->loc_addr[0];
+ tcp_info->arp_idx = (u16)irdma_arp_table(iwqp->iwdev->rf,
+ &tcp_info->dest_ip_addr[3],
+ true, NULL,
+ IRDMA_ARP_RESOLVE);
+ } else {
+ tcp_info->src_port = cm_node->loc_port;
+ tcp_info->dst_port = cm_node->rem_port;
+ memcpy(tcp_info->dest_ip_addr, cm_node->rem_addr,
+ sizeof(tcp_info->dest_ip_addr));
+ memcpy(tcp_info->local_ipaddr, cm_node->loc_addr,
+ sizeof(tcp_info->local_ipaddr));
+
+ tcp_info->arp_idx = (u16)irdma_arp_table(iwqp->iwdev->rf,
+ &tcp_info->dest_ip_addr[0],
+ false, NULL,
+ IRDMA_ARP_RESOLVE);
+ }
+}
+
+/**
+ * irdma_cm_init_tsa_conn - setup qp for RTS
+ * @iwqp: associate qp for the connection
+ * @cm_node: connection's node
+ */
+static void irdma_cm_init_tsa_conn(struct irdma_qp *iwqp,
+ struct irdma_cm_node *cm_node)
+{
+ struct irdma_iwarp_offload_info *iwarp_info;
+ struct irdma_qp_host_ctx_info *ctx_info;
+
+ iwarp_info = &iwqp->iwarp_info;
+ ctx_info = &iwqp->ctx_info;
+
+ ctx_info->tcp_info = &iwqp->tcp_info;
+ ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
+ ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
+
+ iwarp_info->ord_size = cm_node->ord_size;
+ iwarp_info->ird_size = cm_node->ird_size;
+ iwarp_info->rd_en = true;
+ iwarp_info->rdmap_ver = 1;
+ iwarp_info->ddp_ver = 1;
+ iwarp_info->pd_id = iwqp->iwpd->sc_pd.pd_id;
+
+ ctx_info->tcp_info_valid = true;
+ ctx_info->iwarp_info_valid = true;
+ ctx_info->user_pri = cm_node->user_pri;
+
+ irdma_init_tcp_ctx(cm_node, &iwqp->tcp_info, iwqp);
+ if (cm_node->snd_mark_en) {
+ iwarp_info->snd_mark_en = true;
+ iwarp_info->snd_mark_offset = (iwqp->tcp_info.snd_nxt & SNDMARKER_SEQNMASK) +
+ cm_node->lsmm_size;
+ }
+
+ cm_node->state = IRDMA_CM_STATE_OFFLOADED;
+ iwqp->tcp_info.tcp_state = IRDMA_TCP_STATE_ESTABLISHED;
+ iwqp->tcp_info.src_mac_addr_idx = iwqp->iwdev->mac_ip_table_idx;
+
+ if (cm_node->rcv_mark_en) {
+ iwarp_info->rcv_mark_en = true;
+ iwarp_info->align_hdrs = true;
+ }
+
+ irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
+
+ /* once tcp_info is set, no need to do it again */
+ ctx_info->tcp_info_valid = false;
+ ctx_info->iwarp_info_valid = false;
+}
+
+/**
+ * irdma_cm_disconn - when a connection is being closed
+ * @iwqp: associated qp for the connection
+ */
+void irdma_cm_disconn(struct irdma_qp *iwqp)
+{
+ struct irdma_device *iwdev = iwqp->iwdev;
+ struct disconn_work *work;
+ unsigned long flags;
+
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return;
+
+ spin_lock_irqsave(&iwdev->rf->qptable_lock, flags);
+ if (!iwdev->rf->qp_table[iwqp->ibqp.qp_num]) {
+ spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: qp_id %d is already freed\n",
+ iwqp->ibqp.qp_num);
+ kfree(work);
+ return;
+ }
+ irdma_qp_add_ref(&iwqp->ibqp);
+ spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
+
+ work->iwqp = iwqp;
+ INIT_WORK(&work->work, irdma_disconnect_worker);
+ queue_work(iwdev->cleanup_wq, &work->work);
+}
+
+/**
+ * irdma_qp_disconnect - free qp and close cm
+ * @iwqp: associate qp for the connection
+ */
+static void irdma_qp_disconnect(struct irdma_qp *iwqp)
+{
+ struct irdma_device *iwdev = iwqp->iwdev;
+
+ iwqp->active_conn = 0;
+ /* close the CM node down if it is still active */
+ ibdev_dbg(&iwdev->ibdev, "CM: Call close API\n");
+ irdma_cm_close(iwqp->cm_node);
+}
+
+/**
+ * irdma_cm_disconn_true - called by worker thread to disconnect qp
+ * @iwqp: associate qp for the connection
+ */
+static void irdma_cm_disconn_true(struct irdma_qp *iwqp)
+{
+ struct iw_cm_id *cm_id;
+ struct irdma_device *iwdev;
+ struct irdma_sc_qp *qp = &iwqp->sc_qp;
+ u16 last_ae;
+ u8 original_hw_tcp_state;
+ u8 original_ibqp_state;
+ int disconn_status = 0;
+ int issue_disconn = 0;
+ int issue_close = 0;
+ int issue_flush = 0;
+ unsigned long flags;
+ int err;
+
+ iwdev = iwqp->iwdev;
+ spin_lock_irqsave(&iwqp->lock, flags);
+ if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
+ struct ib_qp_attr attr;
+
+ if (iwqp->flush_issued || iwqp->sc_qp.qp_uk.destroy_pending) {
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ return;
+ }
+
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+
+ attr.qp_state = IB_QPS_ERR;
+ irdma_modify_qp_roce(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
+ irdma_ib_qp_event(iwqp, qp->event_type);
+ return;
+ }
+
+ cm_id = iwqp->cm_id;
+ /* make sure we havent already closed this connection */
+ if (!cm_id) {
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ return;
+ }
+
+ original_hw_tcp_state = iwqp->hw_tcp_state;
+ original_ibqp_state = iwqp->ibqp_state;
+ last_ae = iwqp->last_aeq;
+
+ if (qp->term_flags) {
+ issue_disconn = 1;
+ issue_close = 1;
+ iwqp->cm_id = NULL;
+ irdma_terminate_del_timer(qp);
+ if (!iwqp->flush_issued) {
+ iwqp->flush_issued = 1;
+ issue_flush = 1;
+ }
+ } else if ((original_hw_tcp_state == IRDMA_TCP_STATE_CLOSE_WAIT) ||
+ ((original_ibqp_state == IB_QPS_RTS) &&
+ (last_ae == IRDMA_AE_LLP_CONNECTION_RESET))) {
+ issue_disconn = 1;
+ if (last_ae == IRDMA_AE_LLP_CONNECTION_RESET)
+ disconn_status = -ECONNRESET;
+ }
+
+ if ((original_hw_tcp_state == IRDMA_TCP_STATE_CLOSED ||
+ original_hw_tcp_state == IRDMA_TCP_STATE_TIME_WAIT ||
+ last_ae == IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE ||
+ last_ae == IRDMA_AE_BAD_CLOSE ||
+ last_ae == IRDMA_AE_LLP_CONNECTION_RESET || iwdev->reset)) {
+ issue_close = 1;
+ iwqp->cm_id = NULL;
+ qp->term_flags = 0;
+ if (!iwqp->flush_issued) {
+ iwqp->flush_issued = 1;
+ issue_flush = 1;
+ }
+ }
+
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ if (issue_flush && !iwqp->sc_qp.qp_uk.destroy_pending) {
+ irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ | IRDMA_FLUSH_RQ |
+ IRDMA_FLUSH_WAIT);
+
+ if (qp->term_flags)
+ irdma_ib_qp_event(iwqp, qp->event_type);
+ }
+
+ if (!cm_id || !cm_id->event_handler)
+ return;
+
+ spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags);
+ if (!iwqp->cm_node) {
+ spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
+ return;
+ }
+ refcount_inc(&iwqp->cm_node->refcnt);
+
+ spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
+
+ if (issue_disconn) {
+ err = irdma_send_cm_event(iwqp->cm_node, cm_id,
+ IW_CM_EVENT_DISCONNECT,
+ disconn_status);
+ if (err)
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: disconnect event failed: - cm_id = %p\n",
+ cm_id);
+ }
+ if (issue_close) {
+ cm_id->provider_data = iwqp;
+ err = irdma_send_cm_event(iwqp->cm_node, cm_id,
+ IW_CM_EVENT_CLOSE, 0);
+ if (err)
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: close event failed: - cm_id = %p\n",
+ cm_id);
+ irdma_qp_disconnect(iwqp);
+ }
+ irdma_rem_ref_cm_node(iwqp->cm_node);
+}
+
+/**
+ * irdma_disconnect_worker - worker for connection close
+ * @work: points or disconn structure
+ */
+static void irdma_disconnect_worker(struct work_struct *work)
+{
+ struct disconn_work *dwork = container_of(work, struct disconn_work, work);
+ struct irdma_qp *iwqp = dwork->iwqp;
+
+ kfree(dwork);
+ irdma_cm_disconn_true(iwqp);
+ irdma_qp_rem_ref(&iwqp->ibqp);
+}
+
+/**
+ * irdma_free_lsmm_rsrc - free lsmm memory and deregister
+ * @iwqp: associate qp for the connection
+ */
+void irdma_free_lsmm_rsrc(struct irdma_qp *iwqp)
+{
+ struct irdma_device *iwdev;
+
+ iwdev = iwqp->iwdev;
+
+ if (iwqp->ietf_mem.va) {
+ if (iwqp->lsmm_mr)
+ iwdev->ibdev.ops.dereg_mr(iwqp->lsmm_mr, NULL);
+ dma_free_coherent(iwdev->rf->sc_dev.hw->device,
+ iwqp->ietf_mem.size, iwqp->ietf_mem.va,
+ iwqp->ietf_mem.pa);
+ iwqp->ietf_mem.va = NULL;
+ iwqp->ietf_mem.va = NULL;
+ }
+}
+
+/**
+ * irdma_accept - registered call for connection to be accepted
+ * @cm_id: cm information for passive connection
+ * @conn_param: accpet parameters
+ */
+int irdma_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+{
+ struct ib_qp *ibqp;
+ struct irdma_qp *iwqp;
+ struct irdma_device *iwdev;
+ struct irdma_sc_dev *dev;
+ struct irdma_cm_node *cm_node;
+ struct ib_qp_attr attr = {};
+ int passive_state;
+ struct ib_mr *ibmr;
+ struct irdma_pd *iwpd;
+ u16 buf_len = 0;
+ struct irdma_kmem_info accept;
+ u64 tagged_offset;
+ int wait_ret;
+ int ret = 0;
+
+ ibqp = irdma_get_qp(cm_id->device, conn_param->qpn);
+ if (!ibqp)
+ return -EINVAL;
+
+ iwqp = to_iwqp(ibqp);
+ iwdev = iwqp->iwdev;
+ dev = &iwdev->rf->sc_dev;
+ cm_node = cm_id->provider_data;
+
+ if (((struct sockaddr_in *)&cm_id->local_addr)->sin_family == AF_INET) {
+ cm_node->ipv4 = true;
+ cm_node->vlan_id = irdma_get_vlan_ipv4(cm_node->loc_addr);
+ } else {
+ cm_node->ipv4 = false;
+ irdma_netdev_vlan_ipv6(cm_node->loc_addr, &cm_node->vlan_id,
+ NULL);
+ }
+ ibdev_dbg(&iwdev->ibdev, "CM: Accept vlan_id=%d\n",
+ cm_node->vlan_id);
+
+ trace_irdma_accept(cm_node, 0, NULL);
+
+ if (cm_node->state == IRDMA_CM_STATE_LISTENER_DESTROYED) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ passive_state = atomic_add_return(1, &cm_node->passive_state);
+ if (passive_state == IRDMA_SEND_RESET_EVENT) {
+ ret = -ECONNRESET;
+ goto error;
+ }
+
+ buf_len = conn_param->private_data_len + IRDMA_MAX_IETF_SIZE;
+ iwqp->ietf_mem.size = ALIGN(buf_len, 1);
+ iwqp->ietf_mem.va = dma_alloc_coherent(dev->hw->device,
+ iwqp->ietf_mem.size,
+ &iwqp->ietf_mem.pa, GFP_KERNEL);
+ if (!iwqp->ietf_mem.va) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ cm_node->pdata.size = conn_param->private_data_len;
+ accept.addr = iwqp->ietf_mem.va;
+ accept.size = irdma_cm_build_mpa_frame(cm_node, &accept, MPA_KEY_REPLY);
+ memcpy((u8 *)accept.addr + accept.size, conn_param->private_data,
+ conn_param->private_data_len);
+
+ if (cm_node->dev->ws_add(iwqp->sc_qp.vsi, cm_node->user_pri)) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ iwqp->sc_qp.user_pri = cm_node->user_pri;
+ irdma_qp_add_qos(&iwqp->sc_qp);
+ /* setup our first outgoing iWarp send WQE (the IETF frame response) */
+ iwpd = iwqp->iwpd;
+ tagged_offset = (uintptr_t)iwqp->ietf_mem.va;
+ ibmr = irdma_reg_phys_mr(&iwpd->ibpd, iwqp->ietf_mem.pa, buf_len,
+ IB_ACCESS_LOCAL_WRITE, &tagged_offset);
+ if (IS_ERR(ibmr)) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ibmr->pd = &iwpd->ibpd;
+ ibmr->device = iwpd->ibpd.device;
+ iwqp->lsmm_mr = ibmr;
+ if (iwqp->page)
+ iwqp->sc_qp.qp_uk.sq_base = kmap_local_page(iwqp->page);
+
+ cm_node->lsmm_size = accept.size + conn_param->private_data_len;
+ irdma_sc_send_lsmm(&iwqp->sc_qp, iwqp->ietf_mem.va, cm_node->lsmm_size,
+ ibmr->lkey);
+
+ if (iwqp->page)
+ kunmap_local(iwqp->sc_qp.qp_uk.sq_base);
+
+ iwqp->cm_id = cm_id;
+ cm_node->cm_id = cm_id;
+
+ cm_id->provider_data = iwqp;
+ iwqp->active_conn = 0;
+ iwqp->cm_node = cm_node;
+ cm_node->iwqp = iwqp;
+ irdma_cm_init_tsa_conn(iwqp, cm_node);
+ irdma_qp_add_ref(&iwqp->ibqp);
+ cm_id->add_ref(cm_id);
+
+ attr.qp_state = IB_QPS_RTS;
+ cm_node->qhash_set = false;
+ cm_node->cm_core->cm_free_ah(cm_node);
+
+ irdma_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
+ if (dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE) {
+ wait_ret = wait_event_interruptible_timeout(iwqp->waitq,
+ iwqp->rts_ae_rcvd,
+ IRDMA_MAX_TIMEOUT);
+ if (!wait_ret) {
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: Slow Connection: cm_node=%p, loc_port=%d, rem_port=%d, cm_id=%p\n",
+ cm_node, cm_node->loc_port,
+ cm_node->rem_port, cm_node->cm_id);
+ ret = -ECONNRESET;
+ goto error;
+ }
+ }
+
+ irdma_send_cm_event(cm_node, cm_id, IW_CM_EVENT_ESTABLISHED, 0);
+ cm_node->accelerated = true;
+ complete(&cm_node->establish_comp);
+
+ if (cm_node->accept_pend) {
+ atomic_dec(&cm_node->listener->pend_accepts_cnt);
+ cm_node->accept_pend = 0;
+ }
+
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: rem_port=0x%04x, loc_port=0x%04x rem_addr=%pI4 loc_addr=%pI4 cm_node=%p cm_id=%p qp_id = %d\n\n",
+ cm_node->rem_port, cm_node->loc_port, cm_node->rem_addr,
+ cm_node->loc_addr, cm_node, cm_id, ibqp->qp_num);
+ cm_node->cm_core->stats_accepts++;
+
+ return 0;
+error:
+ irdma_free_lsmm_rsrc(iwqp);
+ irdma_rem_ref_cm_node(cm_node);
+
+ return ret;
+}
+
+/**
+ * irdma_reject - registered call for connection to be rejected
+ * @cm_id: cm information for passive connection
+ * @pdata: private data to be sent
+ * @pdata_len: private data length
+ */
+int irdma_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
+{
+ struct irdma_device *iwdev;
+ struct irdma_cm_node *cm_node;
+
+ cm_node = cm_id->provider_data;
+ cm_node->pdata.size = pdata_len;
+
+ trace_irdma_reject(cm_node, 0, NULL);
+
+ iwdev = to_iwdev(cm_id->device);
+ if (!iwdev)
+ return -EINVAL;
+
+ cm_node->cm_core->stats_rejects++;
+
+ if (pdata_len + sizeof(struct ietf_mpa_v2) > IRDMA_MAX_CM_BUF)
+ return -EINVAL;
+
+ return irdma_cm_reject(cm_node, pdata, pdata_len);
+}
+
+/**
+ * irdma_connect - registered call for connection to be established
+ * @cm_id: cm information for passive connection
+ * @conn_param: Information about the connection
+ */
+int irdma_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+{
+ struct ib_qp *ibqp;
+ struct irdma_qp *iwqp;
+ struct irdma_device *iwdev;
+ struct irdma_cm_node *cm_node;
+ struct irdma_cm_info cm_info;
+ struct sockaddr_in *laddr;
+ struct sockaddr_in *raddr;
+ struct sockaddr_in6 *laddr6;
+ struct sockaddr_in6 *raddr6;
+ int ret = 0;
+
+ ibqp = irdma_get_qp(cm_id->device, conn_param->qpn);
+ if (!ibqp)
+ return -EINVAL;
+ iwqp = to_iwqp(ibqp);
+ if (!iwqp)
+ return -EINVAL;
+ iwdev = iwqp->iwdev;
+ if (!iwdev)
+ return -EINVAL;
+
+ laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
+ raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
+ laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
+ raddr6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr;
+
+ if (!(laddr->sin_port) || !(raddr->sin_port))
+ return -EINVAL;
+
+ iwqp->active_conn = 1;
+ iwqp->cm_id = NULL;
+ cm_id->provider_data = iwqp;
+
+ /* set up the connection params for the node */
+ if (cm_id->remote_addr.ss_family == AF_INET) {
+ if (iwdev->vsi.mtu < IRDMA_MIN_MTU_IPV4)
+ return -EINVAL;
+
+ cm_info.ipv4 = true;
+ memset(cm_info.loc_addr, 0, sizeof(cm_info.loc_addr));
+ memset(cm_info.rem_addr, 0, sizeof(cm_info.rem_addr));
+ cm_info.loc_addr[0] = ntohl(laddr->sin_addr.s_addr);
+ cm_info.rem_addr[0] = ntohl(raddr->sin_addr.s_addr);
+ cm_info.loc_port = ntohs(laddr->sin_port);
+ cm_info.rem_port = ntohs(raddr->sin_port);
+ cm_info.vlan_id = irdma_get_vlan_ipv4(cm_info.loc_addr);
+ } else {
+ if (iwdev->vsi.mtu < IRDMA_MIN_MTU_IPV6)
+ return -EINVAL;
+
+ cm_info.ipv4 = false;
+ irdma_copy_ip_ntohl(cm_info.loc_addr,
+ laddr6->sin6_addr.in6_u.u6_addr32);
+ irdma_copy_ip_ntohl(cm_info.rem_addr,
+ raddr6->sin6_addr.in6_u.u6_addr32);
+ cm_info.loc_port = ntohs(laddr6->sin6_port);
+ cm_info.rem_port = ntohs(raddr6->sin6_port);
+ irdma_netdev_vlan_ipv6(cm_info.loc_addr, &cm_info.vlan_id,
+ NULL);
+ }
+ cm_info.cm_id = cm_id;
+ cm_info.qh_qpid = iwdev->vsi.ilq->qp_id;
+ cm_info.tos = cm_id->tos;
+ cm_info.user_pri = rt_tos2priority(cm_id->tos);
+
+ if (iwqp->sc_qp.dev->ws_add(iwqp->sc_qp.vsi, cm_info.user_pri))
+ return -ENOMEM;
+ iwqp->sc_qp.user_pri = cm_info.user_pri;
+ irdma_qp_add_qos(&iwqp->sc_qp);
+ ibdev_dbg(&iwdev->ibdev, "DCB: TOS:[%d] UP:[%d]\n", cm_id->tos,
+ cm_info.user_pri);
+
+ trace_irdma_dcb_tos(iwdev, cm_id->tos, cm_info.user_pri);
+
+ ret = irdma_create_cm_node(&iwdev->cm_core, iwdev, conn_param, &cm_info,
+ &cm_node);
+ if (ret)
+ return ret;
+ ret = cm_node->cm_core->cm_create_ah(cm_node, true);
+ if (ret)
+ goto err;
+ if (irdma_manage_qhash(iwdev, &cm_info,
+ IRDMA_QHASH_TYPE_TCP_ESTABLISHED,
+ IRDMA_QHASH_MANAGE_TYPE_ADD, NULL, true)) {
+ ret = -EINVAL;
+ goto err;
+ }
+ cm_node->qhash_set = true;
+
+ cm_node->apbvt_entry = irdma_add_apbvt(iwdev, cm_info.loc_port);
+ if (!cm_node->apbvt_entry) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ cm_node->apbvt_set = true;
+ iwqp->cm_node = cm_node;
+ cm_node->iwqp = iwqp;
+ iwqp->cm_id = cm_id;
+ irdma_qp_add_ref(&iwqp->ibqp);
+ cm_id->add_ref(cm_id);
+
+ if (cm_node->state != IRDMA_CM_STATE_OFFLOADED) {
+ cm_node->state = IRDMA_CM_STATE_SYN_SENT;
+ ret = irdma_send_syn(cm_node, 0);
+ if (ret)
+ goto err;
+ }
+
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: rem_port=0x%04x, loc_port=0x%04x rem_addr=%pI4 loc_addr=%pI4 cm_node=%p cm_id=%p qp_id = %d\n\n",
+ cm_node->rem_port, cm_node->loc_port, cm_node->rem_addr,
+ cm_node->loc_addr, cm_node, cm_id, ibqp->qp_num);
+
+ trace_irdma_connect(cm_node, 0, NULL);
+
+ return 0;
+
+err:
+ if (cm_info.ipv4)
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: connect() FAILED: dest addr=%pI4",
+ cm_info.rem_addr);
+ else
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: connect() FAILED: dest addr=%pI6",
+ cm_info.rem_addr);
+ irdma_rem_ref_cm_node(cm_node);
+ iwdev->cm_core.stats_connect_errs++;
+
+ return ret;
+}
+
+/**
+ * irdma_create_listen - registered call creating listener
+ * @cm_id: cm information for passive connection
+ * @backlog: to max accept pending count
+ */
+int irdma_create_listen(struct iw_cm_id *cm_id, int backlog)
+{
+ struct irdma_device *iwdev;
+ struct irdma_cm_listener *cm_listen_node;
+ struct irdma_cm_info cm_info = {};
+ enum irdma_status_code err;
+ struct sockaddr_in *laddr;
+ struct sockaddr_in6 *laddr6;
+ bool wildcard = false;
+
+ iwdev = to_iwdev(cm_id->device);
+ if (!iwdev)
+ return -EINVAL;
+
+ laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
+ laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
+ cm_info.qh_qpid = iwdev->vsi.ilq->qp_id;
+
+ if (laddr->sin_family == AF_INET) {
+ if (iwdev->vsi.mtu < IRDMA_MIN_MTU_IPV4)
+ return -EINVAL;
+
+ cm_info.ipv4 = true;
+ cm_info.loc_addr[0] = ntohl(laddr->sin_addr.s_addr);
+ cm_info.loc_port = ntohs(laddr->sin_port);
+
+ if (laddr->sin_addr.s_addr != htonl(INADDR_ANY)) {
+ cm_info.vlan_id = irdma_get_vlan_ipv4(cm_info.loc_addr);
+ } else {
+ cm_info.vlan_id = 0xFFFF;
+ wildcard = true;
+ }
+ } else {
+ if (iwdev->vsi.mtu < IRDMA_MIN_MTU_IPV6)
+ return -EINVAL;
+
+ cm_info.ipv4 = false;
+ irdma_copy_ip_ntohl(cm_info.loc_addr,
+ laddr6->sin6_addr.in6_u.u6_addr32);
+ cm_info.loc_port = ntohs(laddr6->sin6_port);
+ if (ipv6_addr_type(&laddr6->sin6_addr) != IPV6_ADDR_ANY) {
+ irdma_netdev_vlan_ipv6(cm_info.loc_addr,
+ &cm_info.vlan_id, NULL);
+ } else {
+ cm_info.vlan_id = 0xFFFF;
+ wildcard = true;
+ }
+ }
+
+ if (cm_info.vlan_id >= VLAN_N_VID && iwdev->dcb)
+ cm_info.vlan_id = 0;
+ cm_info.backlog = backlog;
+ cm_info.cm_id = cm_id;
+
+ trace_irdma_create_listen(iwdev, &cm_info);
+
+ cm_listen_node = irdma_make_listen_node(&iwdev->cm_core, iwdev,
+ &cm_info);
+ if (!cm_listen_node) {
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: cm_listen_node == NULL\n");
+ return -ENOMEM;
+ }
+
+ cm_id->provider_data = cm_listen_node;
+
+ cm_listen_node->tos = cm_id->tos;
+ cm_listen_node->user_pri = rt_tos2priority(cm_id->tos);
+ cm_info.user_pri = cm_listen_node->user_pri;
+ if (!cm_listen_node->reused_node) {
+ if (wildcard) {
+ err = irdma_add_mqh(iwdev, &cm_info, cm_listen_node);
+ if (err)
+ goto error;
+ } else {
+ err = irdma_manage_qhash(iwdev, &cm_info,
+ IRDMA_QHASH_TYPE_TCP_SYN,
+ IRDMA_QHASH_MANAGE_TYPE_ADD,
+ NULL, true);
+ if (err)
+ goto error;
+
+ cm_listen_node->qhash_set = true;
+ }
+
+ cm_listen_node->apbvt_entry = irdma_add_apbvt(iwdev,
+ cm_info.loc_port);
+ if (!cm_listen_node->apbvt_entry)
+ goto error;
+ }
+ cm_id->add_ref(cm_id);
+ cm_listen_node->cm_core->stats_listen_created++;
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: loc_port=0x%04x loc_addr=%pI4 cm_listen_node=%p cm_id=%p qhash_set=%d vlan_id=%d\n",
+ cm_listen_node->loc_port, cm_listen_node->loc_addr,
+ cm_listen_node, cm_listen_node->cm_id,
+ cm_listen_node->qhash_set, cm_listen_node->vlan_id);
+
+ return 0;
+
+error:
+
+ irdma_cm_del_listen(&iwdev->cm_core, cm_listen_node, false);
+
+ return -EINVAL;
+}
+
+/**
+ * irdma_destroy_listen - registered call to destroy listener
+ * @cm_id: cm information for passive connection
+ */
+int irdma_destroy_listen(struct iw_cm_id *cm_id)
+{
+ struct irdma_device *iwdev;
+
+ iwdev = to_iwdev(cm_id->device);
+ if (cm_id->provider_data)
+ irdma_cm_del_listen(&iwdev->cm_core, cm_id->provider_data,
+ true);
+ else
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: cm_id->provider_data was NULL\n");
+
+ cm_id->rem_ref(cm_id);
+
+ return 0;
+}
+
+/**
+ * irdma_teardown_list_prep - add conn nodes slated for tear down to list
+ * @cm_core: cm's core
+ * @teardown_list: a list to which cm_node will be selected
+ * @ipaddr: pointer to ip address
+ * @nfo: pointer to cm_info structure instance
+ * @disconnect_all: flag indicating disconnect all QPs
+ */
+static void irdma_teardown_list_prep(struct irdma_cm_core *cm_core,
+ struct list_head *teardown_list,
+ u32 *ipaddr,
+ struct irdma_cm_info *nfo,
+ bool disconnect_all)
+{
+ struct irdma_cm_node *cm_node;
+ int bkt;
+
+ hash_for_each_rcu(cm_core->cm_hash_tbl, bkt, cm_node, list) {
+ if ((disconnect_all ||
+ (nfo->vlan_id == cm_node->vlan_id &&
+ !memcmp(cm_node->loc_addr, ipaddr, nfo->ipv4 ? 4 : 16))) &&
+ refcount_inc_not_zero(&cm_node->refcnt))
+ list_add(&cm_node->teardown_entry, teardown_list);
+ }
+}
+
+/**
+ * irdma_cm_event_connected - handle connected active node
+ * @event: the info for cm_node of connection
+ */
+static void irdma_cm_event_connected(struct irdma_cm_event *event)
+{
+ struct irdma_qp *iwqp;
+ struct irdma_device *iwdev;
+ struct irdma_cm_node *cm_node;
+ struct irdma_sc_dev *dev;
+ struct ib_qp_attr attr = {};
+ struct iw_cm_id *cm_id;
+ int status;
+ bool read0;
+ int wait_ret = 0;
+
+ cm_node = event->cm_node;
+ cm_id = cm_node->cm_id;
+ iwqp = cm_id->provider_data;
+ iwdev = iwqp->iwdev;
+ dev = &iwdev->rf->sc_dev;
+ if (iwqp->sc_qp.qp_uk.destroy_pending) {
+ status = -ETIMEDOUT;
+ goto error;
+ }
+
+ irdma_cm_init_tsa_conn(iwqp, cm_node);
+ read0 = (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO);
+ if (iwqp->page)
+ iwqp->sc_qp.qp_uk.sq_base = kmap_local_page(iwqp->page);
+ irdma_sc_send_rtt(&iwqp->sc_qp, read0);
+ if (iwqp->page)
+ kunmap_local(iwqp->sc_qp.qp_uk.sq_base);
+
+ attr.qp_state = IB_QPS_RTS;
+ cm_node->qhash_set = false;
+ irdma_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
+ if (dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE) {
+ wait_ret = wait_event_interruptible_timeout(iwqp->waitq,
+ iwqp->rts_ae_rcvd,
+ IRDMA_MAX_TIMEOUT);
+ if (!wait_ret)
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: Slow Connection: cm_node=%p, loc_port=%d, rem_port=%d, cm_id=%p\n",
+ cm_node, cm_node->loc_port,
+ cm_node->rem_port, cm_node->cm_id);
+ }
+
+ irdma_send_cm_event(cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY, 0);
+ cm_node->accelerated = true;
+ complete(&cm_node->establish_comp);
+ cm_node->cm_core->cm_free_ah(cm_node);
+ return;
+
+error:
+ iwqp->cm_id = NULL;
+ cm_id->provider_data = NULL;
+ irdma_send_cm_event(event->cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY,
+ status);
+ irdma_rem_ref_cm_node(event->cm_node);
+}
+
+/**
+ * irdma_cm_event_reset - handle reset
+ * @event: the info for cm_node of connection
+ */
+static void irdma_cm_event_reset(struct irdma_cm_event *event)
+{
+ struct irdma_cm_node *cm_node = event->cm_node;
+ struct iw_cm_id *cm_id = cm_node->cm_id;
+ struct irdma_qp *iwqp;
+
+ if (!cm_id)
+ return;
+
+ iwqp = cm_id->provider_data;
+ if (!iwqp)
+ return;
+
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: reset event %p - cm_id = %p\n", event->cm_node, cm_id);
+ iwqp->cm_id = NULL;
+
+ irdma_send_cm_event(cm_node, cm_node->cm_id, IW_CM_EVENT_DISCONNECT,
+ -ECONNRESET);
+ irdma_send_cm_event(cm_node, cm_node->cm_id, IW_CM_EVENT_CLOSE, 0);
+}
+
+/**
+ * irdma_cm_event_handler - send event to cm upper layer
+ * @work: pointer of cm event info.
+ */
+static void irdma_cm_event_handler(struct work_struct *work)
+{
+ struct irdma_cm_event *event = container_of(work, struct irdma_cm_event, event_work);
+ struct irdma_cm_node *cm_node;
+
+ if (!event || !event->cm_node || !event->cm_node->cm_core)
+ return;
+
+ cm_node = event->cm_node;
+ trace_irdma_cm_event_handler(cm_node, event->type, NULL);
+
+ switch (event->type) {
+ case IRDMA_CM_EVENT_MPA_REQ:
+ irdma_send_cm_event(cm_node, cm_node->cm_id,
+ IW_CM_EVENT_CONNECT_REQUEST, 0);
+ break;
+ case IRDMA_CM_EVENT_RESET:
+ irdma_cm_event_reset(event);
+ break;
+ case IRDMA_CM_EVENT_CONNECTED:
+ if (!event->cm_node->cm_id ||
+ event->cm_node->state != IRDMA_CM_STATE_OFFLOADED)
+ break;
+ irdma_cm_event_connected(event);
+ break;
+ case IRDMA_CM_EVENT_MPA_REJECT:
+ if (!event->cm_node->cm_id ||
+ cm_node->state == IRDMA_CM_STATE_OFFLOADED)
+ break;
+ irdma_send_cm_event(cm_node, cm_node->cm_id,
+ IW_CM_EVENT_CONNECT_REPLY, -ECONNREFUSED);
+ break;
+ case IRDMA_CM_EVENT_ABORTED:
+ if (!event->cm_node->cm_id ||
+ event->cm_node->state == IRDMA_CM_STATE_OFFLOADED)
+ break;
+ irdma_event_connect_error(event);
+ break;
+ default:
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: bad event type = %d\n", event->type);
+ break;
+ }
+
+ irdma_rem_ref_cm_node(event->cm_node);
+ kfree(event);
+}
+
+/**
+ * irdma_cm_post_event - queue event request for worker thread
+ * @event: cm node's info for up event call
+ */
+static void irdma_cm_post_event(struct irdma_cm_event *event)
+{
+ refcount_inc(&event->cm_node->refcnt);
+ INIT_WORK(&event->event_work, irdma_cm_event_handler);
+ queue_work(event->cm_node->cm_core->event_wq, &event->event_work);
+}
+
+/**
+ * irdma_cm_teardown_connections - teardown QPs
+ * @iwdev: device pointer
+ * @ipaddr: Pointer to IPv4 or IPv6 address
+ * @nfo: Connection info
+ * @disconnect_all: flag indicating disconnect all QPs
+ *
+ * teardown QPs where source or destination addr matches ip addr
+ */
+void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
+ struct irdma_cm_info *nfo,
+ bool disconnect_all)
+{
+ struct irdma_cm_core *cm_core = &iwdev->cm_core;
+ struct list_head *list_core_temp;
+ struct list_head *list_node;
+ struct irdma_cm_node *cm_node;
+ struct list_head teardown_list;
+ struct ib_qp_attr attr;
+ struct irdma_sc_vsi *vsi = &iwdev->vsi;
+ struct irdma_sc_qp *sc_qp;
+ struct irdma_qp *qp;
+ int i;
+
+ INIT_LIST_HEAD(&teardown_list);
+
+ rcu_read_lock();
+ irdma_teardown_list_prep(cm_core, &teardown_list, ipaddr, nfo, disconnect_all);
+ rcu_read_unlock();
+
+ list_for_each_safe (list_node, list_core_temp, &teardown_list) {
+ cm_node = container_of(list_node, struct irdma_cm_node,
+ teardown_entry);
+ attr.qp_state = IB_QPS_ERR;
+ irdma_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL);
+ if (iwdev->reset)
+ irdma_cm_disconn(cm_node->iwqp);
+ irdma_rem_ref_cm_node(cm_node);
+ }
+ if (!iwdev->roce_mode)
+ return;
+
+ INIT_LIST_HEAD(&teardown_list);
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
+ mutex_lock(&vsi->qos[i].qos_mutex);
+ list_for_each_safe (list_node, list_core_temp,
+ &vsi->qos[i].qplist) {
+ u32 qp_ip[4];
+
+ sc_qp = container_of(list_node, struct irdma_sc_qp,
+ list);
+ if (sc_qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_RC)
+ continue;
+
+ qp = sc_qp->qp_uk.back_qp;
+ if (!disconnect_all) {
+ if (nfo->ipv4)
+ qp_ip[0] = qp->udp_info.local_ipaddr[3];
+ else
+ memcpy(qp_ip,
+ &qp->udp_info.local_ipaddr[0],
+ sizeof(qp_ip));
+ }
+
+ if (disconnect_all ||
+ (nfo->vlan_id == (qp->udp_info.vlan_tag & VLAN_VID_MASK) &&
+ !memcmp(qp_ip, ipaddr, nfo->ipv4 ? 4 : 16))) {
+ spin_lock(&iwdev->rf->qptable_lock);
+ if (iwdev->rf->qp_table[sc_qp->qp_uk.qp_id]) {
+ irdma_qp_add_ref(&qp->ibqp);
+ list_add(&qp->teardown_entry,
+ &teardown_list);
+ }
+ spin_unlock(&iwdev->rf->qptable_lock);
+ }
+ }
+ mutex_unlock(&vsi->qos[i].qos_mutex);
+ }
+
+ list_for_each_safe (list_node, list_core_temp, &teardown_list) {
+ qp = container_of(list_node, struct irdma_qp, teardown_entry);
+ attr.qp_state = IB_QPS_ERR;
+ irdma_modify_qp_roce(&qp->ibqp, &attr, IB_QP_STATE, NULL);
+ irdma_qp_rem_ref(&qp->ibqp);
+ }
+}
+
+/**
+ * irdma_qhash_ctrl - enable/disable qhash for list
+ * @iwdev: device pointer
+ * @parent_listen_node: parent listen node
+ * @nfo: cm info node
+ * @ipaddr: Pointer to IPv4 or IPv6 address
+ * @ipv4: flag indicating IPv4 when true
+ * @ifup: flag indicating interface up when true
+ *
+ * Enables or disables the qhash for the node in the child
+ * listen list that matches ipaddr. If no matching IP was found
+ * it will allocate and add a new child listen node to the
+ * parent listen node. The listen_list_lock is assumed to be
+ * held when called.
+ */
+static void irdma_qhash_ctrl(struct irdma_device *iwdev,
+ struct irdma_cm_listener *parent_listen_node,
+ struct irdma_cm_info *nfo, u32 *ipaddr, bool ipv4,
+ bool ifup)
+{
+ struct list_head *child_listen_list = &parent_listen_node->child_listen_list;
+ struct irdma_cm_listener *child_listen_node;
+ struct list_head *pos, *tpos;
+ enum irdma_status_code err;
+ bool node_allocated = false;
+ enum irdma_quad_hash_manage_type op = ifup ?
+ IRDMA_QHASH_MANAGE_TYPE_ADD :
+ IRDMA_QHASH_MANAGE_TYPE_DELETE;
+
+ list_for_each_safe (pos, tpos, child_listen_list) {
+ child_listen_node = list_entry(pos, struct irdma_cm_listener,
+ child_listen_list);
+ if (!memcmp(child_listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16))
+ goto set_qhash;
+ }
+
+ /* if not found then add a child listener if interface is going up */
+ if (!ifup)
+ return;
+ child_listen_node = kmemdup(parent_listen_node,
+ sizeof(*child_listen_node), GFP_ATOMIC);
+ if (!child_listen_node)
+ return;
+
+ node_allocated = true;
+ memcpy(child_listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16);
+
+set_qhash:
+ memcpy(nfo->loc_addr, child_listen_node->loc_addr,
+ sizeof(nfo->loc_addr));
+ nfo->vlan_id = child_listen_node->vlan_id;
+ err = irdma_manage_qhash(iwdev, nfo, IRDMA_QHASH_TYPE_TCP_SYN, op, NULL,
+ false);
+ if (!err) {
+ child_listen_node->qhash_set = ifup;
+ if (node_allocated)
+ list_add(&child_listen_node->child_listen_list,
+ &parent_listen_node->child_listen_list);
+ } else if (node_allocated) {
+ kfree(child_listen_node);
+ }
+}
+
+/**
+ * irdma_if_notify - process an ifdown on an interface
+ * @iwdev: device pointer
+ * @netdev: network device structure
+ * @ipaddr: Pointer to IPv4 or IPv6 address
+ * @ipv4: flag indicating IPv4 when true
+ * @ifup: flag indicating interface up when true
+ */
+void irdma_if_notify(struct irdma_device *iwdev, struct net_device *netdev,
+ u32 *ipaddr, bool ipv4, bool ifup)
+{
+ struct irdma_cm_core *cm_core = &iwdev->cm_core;
+ unsigned long flags;
+ struct irdma_cm_listener *listen_node;
+ static const u32 ip_zero[4] = { 0, 0, 0, 0 };
+ struct irdma_cm_info nfo = {};
+ u16 vlan_id = rdma_vlan_dev_vlan_id(netdev);
+ enum irdma_quad_hash_manage_type op = ifup ?
+ IRDMA_QHASH_MANAGE_TYPE_ADD :
+ IRDMA_QHASH_MANAGE_TYPE_DELETE;
+
+ nfo.vlan_id = vlan_id;
+ nfo.ipv4 = ipv4;
+ nfo.qh_qpid = 1;
+
+ /* Disable or enable qhash for listeners */
+ spin_lock_irqsave(&cm_core->listen_list_lock, flags);
+ list_for_each_entry (listen_node, &cm_core->listen_list, list) {
+ if (vlan_id != listen_node->vlan_id ||
+ (memcmp(listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16) &&
+ memcmp(listen_node->loc_addr, ip_zero, ipv4 ? 4 : 16)))
+ continue;
+
+ memcpy(nfo.loc_addr, listen_node->loc_addr,
+ sizeof(nfo.loc_addr));
+ nfo.loc_port = listen_node->loc_port;
+ nfo.user_pri = listen_node->user_pri;
+ if (!list_empty(&listen_node->child_listen_list)) {
+ irdma_qhash_ctrl(iwdev, listen_node, &nfo, ipaddr, ipv4,
+ ifup);
+ } else if (memcmp(listen_node->loc_addr, ip_zero,
+ ipv4 ? 4 : 16)) {
+ if (!irdma_manage_qhash(iwdev, &nfo,
+ IRDMA_QHASH_TYPE_TCP_SYN, op,
+ NULL, false))
+ listen_node->qhash_set = ifup;
+ }
+ }
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+
+ /* disconnect any connected qp's on ifdown */
+ if (!ifup)
+ irdma_cm_teardown_connections(iwdev, ipaddr, &nfo, false);
+}
diff --git a/drivers/infiniband/hw/irdma/cm.h b/drivers/infiniband/hw/irdma/cm.h
new file mode 100644
index 000000000000..d03cd29333ea
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/cm.h
@@ -0,0 +1,417 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#ifndef IRDMA_CM_H
+#define IRDMA_CM_H
+
+#define IRDMA_MPA_REQUEST_ACCEPT 1
+#define IRDMA_MPA_REQUEST_REJECT 2
+
+/* IETF MPA -- defines */
+#define IEFT_MPA_KEY_REQ "MPA ID Req Frame"
+#define IEFT_MPA_KEY_REP "MPA ID Rep Frame"
+#define IETF_MPA_KEY_SIZE 16
+#define IETF_MPA_VER 1
+#define IETF_MAX_PRIV_DATA_LEN 512
+#define IETF_MPA_FRAME_SIZE 20
+#define IETF_RTR_MSG_SIZE 4
+#define IETF_MPA_V2_FLAG 0x10
+#define SNDMARKER_SEQNMASK 0x000001ff
+#define IRDMA_MAX_IETF_SIZE 32
+
+/* IETF RTR MSG Fields */
+#define IETF_PEER_TO_PEER 0x8000
+#define IETF_FLPDU_ZERO_LEN 0x4000
+#define IETF_RDMA0_WRITE 0x8000
+#define IETF_RDMA0_READ 0x4000
+#define IETF_NO_IRD_ORD 0x3fff
+
+#define MAX_PORTS 65536
+
+#define IRDMA_PASSIVE_STATE_INDICATED 0
+#define IRDMA_DO_NOT_SEND_RESET_EVENT 1
+#define IRDMA_SEND_RESET_EVENT 2
+
+#define MAX_IRDMA_IFS 4
+
+#define SET_ACK 1
+#define SET_SYN 2
+#define SET_FIN 4
+#define SET_RST 8
+
+#define TCP_OPTIONS_PADDING 3
+
+#define IRDMA_DEFAULT_RETRYS 64
+#define IRDMA_DEFAULT_RETRANS 8
+#define IRDMA_DEFAULT_TTL 0x40
+#define IRDMA_DEFAULT_RTT_VAR 6
+#define IRDMA_DEFAULT_SS_THRESH 0x3fffffff
+#define IRDMA_DEFAULT_REXMIT_THRESH 8
+
+#define IRDMA_RETRY_TIMEOUT HZ
+#define IRDMA_SHORT_TIME 10
+#define IRDMA_LONG_TIME (2 * HZ)
+#define IRDMA_MAX_TIMEOUT ((unsigned long)(12 * HZ))
+
+#define IRDMA_CM_HASHTABLE_SIZE 1024
+#define IRDMA_CM_TCP_TIMER_INTERVAL 3000
+#define IRDMA_CM_DEFAULT_MTU 1540
+#define IRDMA_CM_DEFAULT_FRAME_CNT 10
+#define IRDMA_CM_THREAD_STACK_SIZE 256
+#define IRDMA_CM_DEFAULT_RCV_WND 64240
+#define IRDMA_CM_DEFAULT_RCV_WND_SCALED 0x3FFFC
+#define IRDMA_CM_DEFAULT_RCV_WND_SCALE 2
+#define IRDMA_CM_DEFAULT_FREE_PKTS 10
+#define IRDMA_CM_FREE_PKT_LO_WATERMARK 2
+#define IRDMA_CM_DEFAULT_MSS 536
+#define IRDMA_CM_DEFAULT_MPA_VER 2
+#define IRDMA_CM_DEFAULT_SEQ 0x159bf75f
+#define IRDMA_CM_DEFAULT_LOCAL_ID 0x3b47
+#define IRDMA_CM_DEFAULT_SEQ2 0x18ed5740
+#define IRDMA_CM_DEFAULT_LOCAL_ID2 0xb807
+#define IRDMA_MAX_CM_BUF (IRDMA_MAX_IETF_SIZE + IETF_MAX_PRIV_DATA_LEN)
+
+enum ietf_mpa_flags {
+ IETF_MPA_FLAGS_REJECT = 0x20,
+ IETF_MPA_FLAGS_CRC = 0x40,
+ IETF_MPA_FLAGS_MARKERS = 0x80,
+};
+
+enum irdma_timer_type {
+ IRDMA_TIMER_TYPE_SEND,
+ IRDMA_TIMER_TYPE_CLOSE,
+};
+
+enum option_nums {
+ OPTION_NUM_EOL,
+ OPTION_NUM_NONE,
+ OPTION_NUM_MSS,
+ OPTION_NUM_WINDOW_SCALE,
+ OPTION_NUM_SACK_PERM,
+ OPTION_NUM_SACK,
+ OPTION_NUM_WRITE0 = 0xbc,
+};
+
+/* cm node transition states */
+enum irdma_cm_node_state {
+ IRDMA_CM_STATE_UNKNOWN,
+ IRDMA_CM_STATE_INITED,
+ IRDMA_CM_STATE_LISTENING,
+ IRDMA_CM_STATE_SYN_RCVD,
+ IRDMA_CM_STATE_SYN_SENT,
+ IRDMA_CM_STATE_ONE_SIDE_ESTABLISHED,
+ IRDMA_CM_STATE_ESTABLISHED,
+ IRDMA_CM_STATE_ACCEPTING,
+ IRDMA_CM_STATE_MPAREQ_SENT,
+ IRDMA_CM_STATE_MPAREQ_RCVD,
+ IRDMA_CM_STATE_MPAREJ_RCVD,
+ IRDMA_CM_STATE_OFFLOADED,
+ IRDMA_CM_STATE_FIN_WAIT1,
+ IRDMA_CM_STATE_FIN_WAIT2,
+ IRDMA_CM_STATE_CLOSE_WAIT,
+ IRDMA_CM_STATE_TIME_WAIT,
+ IRDMA_CM_STATE_LAST_ACK,
+ IRDMA_CM_STATE_CLOSING,
+ IRDMA_CM_STATE_LISTENER_DESTROYED,
+ IRDMA_CM_STATE_CLOSED,
+};
+
+enum mpa_frame_ver {
+ IETF_MPA_V1 = 1,
+ IETF_MPA_V2 = 2,
+};
+
+enum mpa_frame_key {
+ MPA_KEY_REQUEST,
+ MPA_KEY_REPLY,
+};
+
+enum send_rdma0 {
+ SEND_RDMA_READ_ZERO = 1,
+ SEND_RDMA_WRITE_ZERO = 2,
+};
+
+enum irdma_tcpip_pkt_type {
+ IRDMA_PKT_TYPE_UNKNOWN,
+ IRDMA_PKT_TYPE_SYN,
+ IRDMA_PKT_TYPE_SYNACK,
+ IRDMA_PKT_TYPE_ACK,
+ IRDMA_PKT_TYPE_FIN,
+ IRDMA_PKT_TYPE_RST,
+};
+
+enum irdma_cm_listener_state {
+ IRDMA_CM_LISTENER_PASSIVE_STATE = 1,
+ IRDMA_CM_LISTENER_ACTIVE_STATE = 2,
+ IRDMA_CM_LISTENER_EITHER_STATE = 3,
+};
+
+/* CM event codes */
+enum irdma_cm_event_type {
+ IRDMA_CM_EVENT_UNKNOWN,
+ IRDMA_CM_EVENT_ESTABLISHED,
+ IRDMA_CM_EVENT_MPA_REQ,
+ IRDMA_CM_EVENT_MPA_CONNECT,
+ IRDMA_CM_EVENT_MPA_ACCEPT,
+ IRDMA_CM_EVENT_MPA_REJECT,
+ IRDMA_CM_EVENT_MPA_ESTABLISHED,
+ IRDMA_CM_EVENT_CONNECTED,
+ IRDMA_CM_EVENT_RESET,
+ IRDMA_CM_EVENT_ABORTED,
+};
+
+struct irdma_bth { /* Base Trasnport Header */
+ u8 opcode;
+ u8 flags;
+ __be16 pkey;
+ __be32 qpn;
+ __be32 apsn;
+};
+
+struct ietf_mpa_v1 {
+ u8 key[IETF_MPA_KEY_SIZE];
+ u8 flags;
+ u8 rev;
+ __be16 priv_data_len;
+ u8 priv_data[];
+};
+
+struct ietf_rtr_msg {
+ __be16 ctrl_ird;
+ __be16 ctrl_ord;
+};
+
+struct ietf_mpa_v2 {
+ u8 key[IETF_MPA_KEY_SIZE];
+ u8 flags;
+ u8 rev;
+ __be16 priv_data_len;
+ struct ietf_rtr_msg rtr_msg;
+ u8 priv_data[];
+};
+
+struct option_base {
+ u8 optionnum;
+ u8 len;
+};
+
+struct option_mss {
+ u8 optionnum;
+ u8 len;
+ __be16 mss;
+};
+
+struct option_windowscale {
+ u8 optionnum;
+ u8 len;
+ u8 shiftcount;
+};
+
+union all_known_options {
+ char eol;
+ struct option_base base;
+ struct option_mss mss;
+ struct option_windowscale windowscale;
+};
+
+struct irdma_timer_entry {
+ struct list_head list;
+ unsigned long timetosend; /* jiffies */
+ struct irdma_puda_buf *sqbuf;
+ u32 type;
+ u32 retrycount;
+ u32 retranscount;
+ u32 context;
+ u32 send_retrans;
+ int close_when_complete;
+};
+
+/* CM context params */
+struct irdma_cm_tcp_context {
+ u8 client;
+ u32 loc_seq_num;
+ u32 loc_ack_num;
+ u32 rem_ack_num;
+ u32 rcv_nxt;
+ u32 loc_id;
+ u32 rem_id;
+ u32 snd_wnd;
+ u32 max_snd_wnd;
+ u32 rcv_wnd;
+ u32 mss;
+ u8 snd_wscale;
+ u8 rcv_wscale;
+};
+
+struct irdma_apbvt_entry {
+ struct hlist_node hlist;
+ u32 use_cnt;
+ u16 port;
+};
+
+struct irdma_cm_listener {
+ struct list_head list;
+ struct iw_cm_id *cm_id;
+ struct irdma_cm_core *cm_core;
+ struct irdma_device *iwdev;
+ struct list_head child_listen_list;
+ struct irdma_apbvt_entry *apbvt_entry;
+ enum irdma_cm_listener_state listener_state;
+ refcount_t refcnt;
+ atomic_t pend_accepts_cnt;
+ u32 loc_addr[4];
+ u32 reused_node;
+ int backlog;
+ u16 loc_port;
+ u16 vlan_id;
+ u8 loc_mac[ETH_ALEN];
+ u8 user_pri;
+ u8 tos;
+ bool qhash_set:1;
+ bool ipv4:1;
+};
+
+struct irdma_kmem_info {
+ void *addr;
+ u32 size;
+};
+
+struct irdma_mpa_priv_info {
+ const void *addr;
+ u32 size;
+};
+
+struct irdma_cm_node {
+ struct irdma_qp *iwqp;
+ struct irdma_device *iwdev;
+ struct irdma_sc_dev *dev;
+ struct irdma_cm_tcp_context tcp_cntxt;
+ struct irdma_cm_core *cm_core;
+ struct irdma_timer_entry *send_entry;
+ struct irdma_timer_entry *close_entry;
+ struct irdma_cm_listener *listener;
+ struct list_head timer_entry;
+ struct list_head reset_entry;
+ struct list_head teardown_entry;
+ struct irdma_apbvt_entry *apbvt_entry;
+ struct rcu_head rcu_head;
+ struct irdma_mpa_priv_info pdata;
+ struct irdma_sc_ah *ah;
+ union {
+ struct ietf_mpa_v1 mpa_frame;
+ struct ietf_mpa_v2 mpa_v2_frame;
+ };
+ struct irdma_kmem_info mpa_hdr;
+ struct iw_cm_id *cm_id;
+ struct hlist_node list;
+ struct completion establish_comp;
+ spinlock_t retrans_list_lock; /* protect CM node rexmit updates*/
+ atomic_t passive_state;
+ refcount_t refcnt;
+ enum irdma_cm_node_state state;
+ enum send_rdma0 send_rdma0_op;
+ enum mpa_frame_ver mpa_frame_rev;
+ u32 loc_addr[4], rem_addr[4];
+ u16 loc_port, rem_port;
+ int apbvt_set;
+ int accept_pend;
+ u16 vlan_id;
+ u16 ird_size;
+ u16 ord_size;
+ u16 mpav2_ird_ord;
+ u16 lsmm_size;
+ u8 pdata_buf[IETF_MAX_PRIV_DATA_LEN];
+ u8 loc_mac[ETH_ALEN];
+ u8 rem_mac[ETH_ALEN];
+ u8 user_pri;
+ u8 tos;
+ bool ack_rcvd:1;
+ bool qhash_set:1;
+ bool ipv4:1;
+ bool snd_mark_en:1;
+ bool rcv_mark_en:1;
+ bool do_lpb:1;
+ bool accelerated:1;
+};
+
+/* Used by internal CM APIs to pass CM information*/
+struct irdma_cm_info {
+ struct iw_cm_id *cm_id;
+ u16 loc_port;
+ u16 rem_port;
+ u32 loc_addr[4];
+ u32 rem_addr[4];
+ u32 qh_qpid;
+ u16 vlan_id;
+ int backlog;
+ u8 user_pri;
+ u8 tos;
+ bool ipv4;
+};
+
+struct irdma_cm_event {
+ enum irdma_cm_event_type type;
+ struct irdma_cm_info cm_info;
+ struct work_struct event_work;
+ struct irdma_cm_node *cm_node;
+};
+
+struct irdma_cm_core {
+ struct irdma_device *iwdev;
+ struct irdma_sc_dev *dev;
+ struct list_head listen_list;
+ DECLARE_HASHTABLE(cm_hash_tbl, 8);
+ DECLARE_HASHTABLE(apbvt_hash_tbl, 8);
+ struct timer_list tcp_timer;
+ struct workqueue_struct *event_wq;
+ spinlock_t ht_lock; /* protect CM node (active side) list */
+ spinlock_t listen_list_lock; /* protect listener list */
+ spinlock_t apbvt_lock; /*serialize apbvt add/del entries*/
+ u64 stats_nodes_created;
+ u64 stats_nodes_destroyed;
+ u64 stats_listen_created;
+ u64 stats_listen_destroyed;
+ u64 stats_listen_nodes_created;
+ u64 stats_listen_nodes_destroyed;
+ u64 stats_lpbs;
+ u64 stats_accepts;
+ u64 stats_rejects;
+ u64 stats_connect_errs;
+ u64 stats_passive_errs;
+ u64 stats_pkt_retrans;
+ u64 stats_backlog_drops;
+ struct irdma_puda_buf *(*form_cm_frame)(struct irdma_cm_node *cm_node,
+ struct irdma_kmem_info *options,
+ struct irdma_kmem_info *hdr,
+ struct irdma_mpa_priv_info *pdata,
+ u8 flags);
+ int (*cm_create_ah)(struct irdma_cm_node *cm_node, bool wait);
+ void (*cm_free_ah)(struct irdma_cm_node *cm_node);
+};
+
+int irdma_schedule_cm_timer(struct irdma_cm_node *cm_node,
+ struct irdma_puda_buf *sqbuf,
+ enum irdma_timer_type type, int send_retrans,
+ int close_when_complete);
+int irdma_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
+int irdma_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
+int irdma_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
+int irdma_create_listen(struct iw_cm_id *cm_id, int backlog);
+int irdma_destroy_listen(struct iw_cm_id *cm_id);
+int irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, bool ipv4, u8 *mac);
+void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
+ struct irdma_cm_info *nfo,
+ bool disconnect_all);
+int irdma_cm_start(struct irdma_device *dev);
+int irdma_cm_stop(struct irdma_device *dev);
+bool irdma_ipv4_is_lpb(u32 loc_addr, u32 rem_addr);
+bool irdma_ipv6_is_lpb(u32 *loc_addr, u32 *rem_addr);
+int irdma_arp_table(struct irdma_pci_f *rf, u32 *ip_addr, bool ipv4,
+ u8 *mac_addr, u32 action);
+void irdma_if_notify(struct irdma_device *iwdev, struct net_device *netdev,
+ u32 *ipaddr, bool ipv4, bool ifup);
+bool irdma_port_in_use(struct irdma_cm_core *cm_core, u16 port);
+void irdma_send_ack(struct irdma_cm_node *cm_node);
+void irdma_lpb_nop(struct irdma_sc_qp *qp);
+void irdma_rem_ref_cm_node(struct irdma_cm_node *cm_node);
+void irdma_add_conn_est_qh(struct irdma_cm_node *cm_node);
+#endif /* IRDMA_CM_H */
diff --git a/drivers/infiniband/hw/irdma/ctrl.c b/drivers/infiniband/hw/irdma/ctrl.c
new file mode 100644
index 000000000000..b1023a7d0bd1
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/ctrl.c
@@ -0,0 +1,5657 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#include "osdep.h"
+#include "status.h"
+#include "hmc.h"
+#include "defs.h"
+#include "type.h"
+#include "ws.h"
+#include "protos.h"
+
+/**
+ * irdma_get_qp_from_list - get next qp from a list
+ * @head: Listhead of qp's
+ * @qp: current qp
+ */
+struct irdma_sc_qp *irdma_get_qp_from_list(struct list_head *head,
+ struct irdma_sc_qp *qp)
+{
+ struct list_head *lastentry;
+ struct list_head *entry = NULL;
+
+ if (list_empty(head))
+ return NULL;
+
+ if (!qp) {
+ entry = head->next;
+ } else {
+ lastentry = &qp->list;
+ entry = lastentry->next;
+ if (entry == head)
+ return NULL;
+ }
+
+ return container_of(entry, struct irdma_sc_qp, list);
+}
+
+/**
+ * irdma_sc_suspend_resume_qps - suspend/resume all qp's on VSI
+ * @vsi: the VSI struct pointer
+ * @op: Set to IRDMA_OP_RESUME or IRDMA_OP_SUSPEND
+ */
+void irdma_sc_suspend_resume_qps(struct irdma_sc_vsi *vsi, u8 op)
+{
+ struct irdma_sc_qp *qp = NULL;
+ u8 i;
+
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
+ mutex_lock(&vsi->qos[i].qos_mutex);
+ qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
+ while (qp) {
+ if (op == IRDMA_OP_RESUME) {
+ if (!qp->dev->ws_add(vsi, i)) {
+ qp->qs_handle =
+ vsi->qos[qp->user_pri].qs_handle;
+ irdma_cqp_qp_suspend_resume(qp, op);
+ } else {
+ irdma_cqp_qp_suspend_resume(qp, op);
+ irdma_modify_qp_to_err(qp);
+ }
+ } else if (op == IRDMA_OP_SUSPEND) {
+ /* issue cqp suspend command */
+ if (!irdma_cqp_qp_suspend_resume(qp, op))
+ atomic_inc(&vsi->qp_suspend_reqs);
+ }
+ qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
+ }
+ mutex_unlock(&vsi->qos[i].qos_mutex);
+ }
+}
+
+/**
+ * irdma_change_l2params - given the new l2 parameters, change all qp
+ * @vsi: RDMA VSI pointer
+ * @l2params: New parameters from l2
+ */
+void irdma_change_l2params(struct irdma_sc_vsi *vsi,
+ struct irdma_l2params *l2params)
+{
+ if (l2params->mtu_changed) {
+ vsi->mtu = l2params->mtu;
+ if (vsi->ieq)
+ irdma_reinitialize_ieq(vsi);
+ }
+
+ if (!l2params->tc_changed)
+ return;
+
+ vsi->tc_change_pending = false;
+ irdma_sc_suspend_resume_qps(vsi, IRDMA_OP_RESUME);
+}
+
+/**
+ * irdma_qp_rem_qos - remove qp from qos lists during destroy qp
+ * @qp: qp to be removed from qos
+ */
+void irdma_qp_rem_qos(struct irdma_sc_qp *qp)
+{
+ struct irdma_sc_vsi *vsi = qp->vsi;
+
+ ibdev_dbg(to_ibdev(qp->dev),
+ "DCB: DCB: Remove qp[%d] UP[%d] qset[%d] on_qoslist[%d]\n",
+ qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle,
+ qp->on_qoslist);
+ mutex_lock(&vsi->qos[qp->user_pri].qos_mutex);
+ if (qp->on_qoslist) {
+ qp->on_qoslist = false;
+ list_del(&qp->list);
+ }
+ mutex_unlock(&vsi->qos[qp->user_pri].qos_mutex);
+}
+
+/**
+ * irdma_qp_add_qos - called during setctx for qp to be added to qos
+ * @qp: qp to be added to qos
+ */
+void irdma_qp_add_qos(struct irdma_sc_qp *qp)
+{
+ struct irdma_sc_vsi *vsi = qp->vsi;
+
+ ibdev_dbg(to_ibdev(qp->dev),
+ "DCB: DCB: Add qp[%d] UP[%d] qset[%d] on_qoslist[%d]\n",
+ qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle,
+ qp->on_qoslist);
+ mutex_lock(&vsi->qos[qp->user_pri].qos_mutex);
+ if (!qp->on_qoslist) {
+ list_add(&qp->list, &vsi->qos[qp->user_pri].qplist);
+ qp->on_qoslist = true;
+ qp->qs_handle = vsi->qos[qp->user_pri].qs_handle;
+ }
+ mutex_unlock(&vsi->qos[qp->user_pri].qos_mutex);
+}
+
+/**
+ * irdma_sc_pd_init - initialize sc pd struct
+ * @dev: sc device struct
+ * @pd: sc pd ptr
+ * @pd_id: pd_id for allocated pd
+ * @abi_ver: User/Kernel ABI version
+ */
+void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id,
+ int abi_ver)
+{
+ pd->pd_id = pd_id;
+ pd->abi_ver = abi_ver;
+ pd->dev = dev;
+}
+
+/**
+ * irdma_sc_add_arp_cache_entry - cqp wqe add arp cache entry
+ * @cqp: struct for cqp hw
+ * @info: arp entry information
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum irdma_status_code
+irdma_sc_add_arp_cache_entry(struct irdma_sc_cqp *cqp,
+ struct irdma_add_arp_cache_entry_info *info,
+ u64 scratch, bool post_sq)
+{
+ __le64 *wqe;
+ u64 hdr;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return IRDMA_ERR_RING_FULL;
+ set_64bit_val(wqe, 8, info->reach_max);
+ set_64bit_val(wqe, 16, ether_addr_to_u64(info->mac_addr));
+
+ hdr = info->arp_index |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_ARP) |
+ FIELD_PREP(IRDMA_CQPSQ_MAT_PERMANENT, (info->permanent ? 1 : 0)) |
+ FIELD_PREP(IRDMA_CQPSQ_MAT_ENTRYVALID, 1) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: ARP_CACHE_ENTRY WQE", DUMP_PREFIX_OFFSET,
+ 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_del_arp_cache_entry - dele arp cache entry
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @arp_index: arp index to delete arp entry
+ * @post_sq: flag for cqp db to ring
+ */
+static enum irdma_status_code
+irdma_sc_del_arp_cache_entry(struct irdma_sc_cqp *cqp, u64 scratch,
+ u16 arp_index, bool post_sq)
+{
+ __le64 *wqe;
+ u64 hdr;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return IRDMA_ERR_RING_FULL;
+
+ hdr = arp_index |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_ARP) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: ARP_CACHE_DEL_ENTRY WQE",
+ DUMP_PREFIX_OFFSET, 16, 8, wqe,
+ IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_manage_apbvt_entry - for adding and deleting apbvt entries
+ * @cqp: struct for cqp hw
+ * @info: info for apbvt entry to add or delete
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum irdma_status_code
+irdma_sc_manage_apbvt_entry(struct irdma_sc_cqp *cqp,
+ struct irdma_apbvt_info *info, u64 scratch,
+ bool post_sq)
+{
+ __le64 *wqe;
+ u64 hdr;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return IRDMA_ERR_RING_FULL;
+
+ set_64bit_val(wqe, 16, info->port);
+
+ hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_APBVT) |
+ FIELD_PREP(IRDMA_CQPSQ_MAPT_ADDPORT, info->add) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: MANAGE_APBVT WQE", DUMP_PREFIX_OFFSET, 16,
+ 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_manage_qhash_table_entry - manage quad hash entries
+ * @cqp: struct for cqp hw
+ * @info: info for quad hash to manage
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ *
+ * This is called before connection establishment is started.
+ * For passive connections, when listener is created, it will
+ * call with entry type of IRDMA_QHASH_TYPE_TCP_SYN with local
+ * ip address and tcp port. When SYN is received (passive
+ * connections) or sent (active connections), this routine is
+ * called with entry type of IRDMA_QHASH_TYPE_TCP_ESTABLISHED
+ * and quad is passed in info.
+ *
+ * When iwarp connection is done and its state moves to RTS, the
+ * quad hash entry in the hardware will point to iwarp's qp
+ * number and requires no calls from the driver.
+ */
+static enum irdma_status_code
+irdma_sc_manage_qhash_table_entry(struct irdma_sc_cqp *cqp,
+ struct irdma_qhash_table_info *info,
+ u64 scratch, bool post_sq)
+{
+ __le64 *wqe;
+ u64 qw1 = 0;
+ u64 qw2 = 0;
+ u64 temp;
+ struct irdma_sc_vsi *vsi = info->vsi;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return IRDMA_ERR_RING_FULL;
+
+ set_64bit_val(wqe, 0, ether_addr_to_u64(info->mac_addr));
+
+ qw1 = FIELD_PREP(IRDMA_CQPSQ_QHASH_QPN, info->qp_num) |
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_DEST_PORT, info->dest_port);
+ if (info->ipv4_valid) {
+ set_64bit_val(wqe, 48,
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->dest_ip[0]));
+ } else {
+ set_64bit_val(wqe, 56,
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR0, info->dest_ip[0]) |
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR1, info->dest_ip[1]));
+
+ set_64bit_val(wqe, 48,
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR2, info->dest_ip[2]) |
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->dest_ip[3]));
+ }
+ qw2 = FIELD_PREP(IRDMA_CQPSQ_QHASH_QS_HANDLE,
+ vsi->qos[info->user_pri].qs_handle);
+ if (info->vlan_valid)
+ qw2 |= FIELD_PREP(IRDMA_CQPSQ_QHASH_VLANID, info->vlan_id);
+ set_64bit_val(wqe, 16, qw2);
+ if (info->entry_type == IRDMA_QHASH_TYPE_TCP_ESTABLISHED) {
+ qw1 |= FIELD_PREP(IRDMA_CQPSQ_QHASH_SRC_PORT, info->src_port);
+ if (!info->ipv4_valid) {
+ set_64bit_val(wqe, 40,
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR0, info->src_ip[0]) |
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR1, info->src_ip[1]));
+ set_64bit_val(wqe, 32,
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR2, info->src_ip[2]) |
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->src_ip[3]));
+ } else {
+ set_64bit_val(wqe, 32,
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->src_ip[0]));
+ }
+ }
+
+ set_64bit_val(wqe, 8, qw1);
+ temp = FIELD_PREP(IRDMA_CQPSQ_QHASH_WQEVALID, cqp->polarity) |
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_OPCODE,
+ IRDMA_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY) |
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_MANAGE, info->manage) |
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_IPV4VALID, info->ipv4_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_VLANVALID, info->vlan_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_ENTRYTYPE, info->entry_type);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, temp);
+
+ print_hex_dump_debug("WQE: MANAGE_QHASH WQE", DUMP_PREFIX_OFFSET, 16,
+ 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_qp_init - initialize qp
+ * @qp: sc qp
+ * @info: initialization qp info
+ */
+enum irdma_status_code irdma_sc_qp_init(struct irdma_sc_qp *qp,
+ struct irdma_qp_init_info *info)
+{
+ enum irdma_status_code ret_code;
+ u32 pble_obj_cnt;
+ u16 wqe_size;
+
+ if (info->qp_uk_init_info.max_sq_frag_cnt >
+ info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags ||
+ info->qp_uk_init_info.max_rq_frag_cnt >
+ info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags)
+ return IRDMA_ERR_INVALID_FRAG_COUNT;
+
+ qp->dev = info->pd->dev;
+ qp->vsi = info->vsi;
+ qp->ieq_qp = info->vsi->exception_lan_q;
+ qp->sq_pa = info->sq_pa;
+ qp->rq_pa = info->rq_pa;
+ qp->hw_host_ctx_pa = info->host_ctx_pa;
+ qp->q2_pa = info->q2_pa;
+ qp->shadow_area_pa = info->shadow_area_pa;
+ qp->q2_buf = info->q2;
+ qp->pd = info->pd;
+ qp->hw_host_ctx = info->host_ctx;
+ info->qp_uk_init_info.wqe_alloc_db = qp->pd->dev->wqe_alloc_db;
+ ret_code = irdma_uk_qp_init(&qp->qp_uk, &info->qp_uk_init_info);
+ if (ret_code)
+ return ret_code;
+
+ qp->virtual_map = info->virtual_map;
+ pble_obj_cnt = info->pd->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
+
+ if ((info->virtual_map && info->sq_pa >= pble_obj_cnt) ||
+ (info->virtual_map && info->rq_pa >= pble_obj_cnt))
+ return IRDMA_ERR_INVALID_PBLE_INDEX;
+
+ qp->llp_stream_handle = (void *)(-1);
+ qp->hw_sq_size = irdma_get_encoded_wqe_size(qp->qp_uk.sq_ring.size,
+ IRDMA_QUEUE_TYPE_SQ_RQ);
+ ibdev_dbg(to_ibdev(qp->dev),
+ "WQE: hw_sq_size[%04d] sq_ring.size[%04d]\n",
+ qp->hw_sq_size, qp->qp_uk.sq_ring.size);
+ if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1 && qp->pd->abi_ver > 4)
+ wqe_size = IRDMA_WQE_SIZE_128;
+ else
+ ret_code = irdma_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt,
+ &wqe_size);
+ if (ret_code)
+ return ret_code;
+
+ qp->hw_rq_size = irdma_get_encoded_wqe_size(qp->qp_uk.rq_size *
+ (wqe_size / IRDMA_QP_WQE_MIN_SIZE), IRDMA_QUEUE_TYPE_SQ_RQ);
+ ibdev_dbg(to_ibdev(qp->dev),
+ "WQE: hw_rq_size[%04d] qp_uk.rq_size[%04d] wqe_size[%04d]\n",
+ qp->hw_rq_size, qp->qp_uk.rq_size, wqe_size);
+ qp->sq_tph_val = info->sq_tph_val;
+ qp->rq_tph_val = info->rq_tph_val;
+ qp->sq_tph_en = info->sq_tph_en;
+ qp->rq_tph_en = info->rq_tph_en;
+ qp->rcv_tph_en = info->rcv_tph_en;
+ qp->xmit_tph_en = info->xmit_tph_en;
+ qp->qp_uk.first_sq_wq = info->qp_uk_init_info.first_sq_wq;
+ qp->qs_handle = qp->vsi->qos[qp->user_pri].qs_handle;
+
+ return 0;
+}
+
+/**
+ * irdma_sc_qp_create - create qp
+ * @qp: sc qp
+ * @info: qp create info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+enum irdma_status_code irdma_sc_qp_create(struct irdma_sc_qp *qp, struct irdma_create_qp_info *info,
+ u64 scratch, bool post_sq)
+{
+ struct irdma_sc_cqp *cqp;
+ __le64 *wqe;
+ u64 hdr;
+
+ cqp = qp->dev->cqp;
+ if (qp->qp_uk.qp_id < cqp->dev->hw_attrs.min_hw_qp_id ||
+ qp->qp_uk.qp_id > (cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt - 1))
+ return IRDMA_ERR_INVALID_QP_ID;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return IRDMA_ERR_RING_FULL;
+
+ set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
+ set_64bit_val(wqe, 40, qp->shadow_area_pa);
+
+ hdr = qp->qp_uk.qp_id |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_QP) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_ORDVALID, (info->ord_valid ? 1 : 0)) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_TOECTXVALID, info->tcp_ctx_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_MACVALID, info->mac_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_VQ, qp->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_FORCELOOPBACK, info->force_lpb) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, info->cq_num_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_ARPTABIDXVALID,
+ info->arp_cache_idx_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, info->next_iwarp_state) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: QP_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_qp_modify - modify qp cqp wqe
+ * @qp: sc qp
+ * @info: modify qp info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+enum irdma_status_code irdma_sc_qp_modify(struct irdma_sc_qp *qp,
+ struct irdma_modify_qp_info *info,
+ u64 scratch, bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_sc_cqp *cqp;
+ u64 hdr;
+ u8 term_actions = 0;
+ u8 term_len = 0;
+
+ cqp = qp->dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return IRDMA_ERR_RING_FULL;
+
+ if (info->next_iwarp_state == IRDMA_QP_STATE_TERMINATE) {
+ if (info->dont_send_fin)
+ term_actions += IRDMAQP_TERM_SEND_TERM_ONLY;
+ if (info->dont_send_term)
+ term_actions += IRDMAQP_TERM_SEND_FIN_ONLY;
+ if (term_actions == IRDMAQP_TERM_SEND_TERM_AND_FIN ||
+ term_actions == IRDMAQP_TERM_SEND_TERM_ONLY)
+ term_len = info->termlen;
+ }
+
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMA_CQPSQ_QP_NEWMSS, info->new_mss) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_TERMLEN, term_len));
+ set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
+ set_64bit_val(wqe, 40, qp->shadow_area_pa);
+
+ hdr = qp->qp_uk.qp_id |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MODIFY_QP) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_ORDVALID, info->ord_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_TOECTXVALID, info->tcp_ctx_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_CACHEDVARVALID,
+ info->cached_var_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_VQ, qp->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_FORCELOOPBACK, info->force_lpb) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, info->cq_num_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_MACVALID, info->mac_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_MSSCHANGE, info->mss_change) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_REMOVEHASHENTRY,
+ info->remove_hash_idx) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_TERMACT, term_actions) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_RESETCON, info->reset_tcp_conn) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_ARPTABIDXVALID,
+ info->arp_cache_idx_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, info->next_iwarp_state) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: QP_MODIFY WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_qp_destroy - cqp destroy qp
+ * @qp: sc qp
+ * @scratch: u64 saved to be used during cqp completion
+ * @remove_hash_idx: flag if to remove hash idx
+ * @ignore_mw_bnd: memory window bind flag
+ * @post_sq: flag for cqp db to ring
+ */
+enum irdma_status_code irdma_sc_qp_destroy(struct irdma_sc_qp *qp, u64 scratch,
+ bool remove_hash_idx, bool ignore_mw_bnd,
+ bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_sc_cqp *cqp;
+ u64 hdr;
+
+ cqp = qp->dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return IRDMA_ERR_RING_FULL;
+
+ set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
+ set_64bit_val(wqe, 40, qp->shadow_area_pa);
+
+ hdr = qp->qp_uk.qp_id |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_QP) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_IGNOREMWBOUND, ignore_mw_bnd) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_REMOVEHASHENTRY, remove_hash_idx) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: QP_DESTROY WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_get_encoded_ird_size -
+ * @ird_size: IRD size
+ * The ird from the connection is rounded to a supported HW setting and then encoded
+ * for ird_size field of qp_ctx. Consumers are expected to provide valid ird size based
+ * on hardware attributes. IRD size defaults to a value of 4 in case of invalid input
+ */
+static u8 irdma_sc_get_encoded_ird_size(u16 ird_size)
+{
+ switch (ird_size ?
+ roundup_pow_of_two(2 * ird_size) : 4) {
+ case 256:
+ return IRDMA_IRD_HW_SIZE_256;
+ case 128:
+ return IRDMA_IRD_HW_SIZE_128;
+ case 64:
+ case 32:
+ return IRDMA_IRD_HW_SIZE_64;
+ case 16:
+ case 8:
+ return IRDMA_IRD_HW_SIZE_16;
+ case 4:
+ default:
+ break;
+ }
+
+ return IRDMA_IRD_HW_SIZE_4;
+}
+
+/**
+ * irdma_sc_qp_setctx_roce - set qp's context
+ * @qp: sc qp
+ * @qp_ctx: context ptr
+ * @info: ctx info
+ */
+void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx,
+ struct irdma_qp_host_ctx_info *info)
+{
+ struct irdma_roce_offload_info *roce_info;
+ struct irdma_udp_offload_info *udp;
+ u8 push_mode_en;
+ u32 push_idx;
+
+ roce_info = info->roce_info;
+ udp = info->udp_info;
+ qp->user_pri = info->user_pri;
+ if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) {
+ push_mode_en = 0;
+ push_idx = 0;
+ } else {
+ push_mode_en = 1;
+ push_idx = qp->push_idx;
+ }
+ set_64bit_val(qp_ctx, 0,
+ FIELD_PREP(IRDMAQPC_RQWQESIZE, qp->qp_uk.rq_wqe_size) |
+ FIELD_PREP(IRDMAQPC_RCVTPHEN, qp->rcv_tph_en) |
+ FIELD_PREP(IRDMAQPC_XMITTPHEN, qp->xmit_tph_en) |
+ FIELD_PREP(IRDMAQPC_RQTPHEN, qp->rq_tph_en) |
+ FIELD_PREP(IRDMAQPC_SQTPHEN, qp->sq_tph_en) |
+ FIELD_PREP(IRDMAQPC_PPIDX, push_idx) |
+ FIELD_PREP(IRDMAQPC_PMENA, push_mode_en) |
+ FIELD_PREP(IRDMAQPC_PDIDXHI, roce_info->pd_id >> 16) |
+ FIELD_PREP(IRDMAQPC_DC_TCP_EN, roce_info->dctcp_en) |
+ FIELD_PREP(IRDMAQPC_ERR_RQ_IDX_VALID, roce_info->err_rq_idx_valid) |
+ FIELD_PREP(IRDMAQPC_ISQP1, roce_info->is_qp1) |
+ FIELD_PREP(IRDMAQPC_ROCE_TVER, roce_info->roce_tver) |
+ FIELD_PREP(IRDMAQPC_IPV4, udp->ipv4) |
+ FIELD_PREP(IRDMAQPC_INSERTVLANTAG, udp->insert_vlan_tag));
+ set_64bit_val(qp_ctx, 8, qp->sq_pa);
+ set_64bit_val(qp_ctx, 16, qp->rq_pa);
+ if ((roce_info->dcqcn_en || roce_info->dctcp_en) &&
+ !(udp->tos & 0x03))
+ udp->tos |= ECN_CODE_PT_VAL;
+ set_64bit_val(qp_ctx, 24,
+ FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) |
+ FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size) |
+ FIELD_PREP(IRDMAQPC_TTL, udp->ttl) | FIELD_PREP(IRDMAQPC_TOS, udp->tos) |
+ FIELD_PREP(IRDMAQPC_SRCPORTNUM, udp->src_port) |
+ FIELD_PREP(IRDMAQPC_DESTPORTNUM, udp->dst_port));
+ set_64bit_val(qp_ctx, 32,
+ FIELD_PREP(IRDMAQPC_DESTIPADDR2, udp->dest_ip_addr[2]) |
+ FIELD_PREP(IRDMAQPC_DESTIPADDR3, udp->dest_ip_addr[3]));
+ set_64bit_val(qp_ctx, 40,
+ FIELD_PREP(IRDMAQPC_DESTIPADDR0, udp->dest_ip_addr[0]) |
+ FIELD_PREP(IRDMAQPC_DESTIPADDR1, udp->dest_ip_addr[1]));
+ set_64bit_val(qp_ctx, 48,
+ FIELD_PREP(IRDMAQPC_SNDMSS, udp->snd_mss) |
+ FIELD_PREP(IRDMAQPC_VLANTAG, udp->vlan_tag) |
+ FIELD_PREP(IRDMAQPC_ARPIDX, udp->arp_idx));
+ set_64bit_val(qp_ctx, 56,
+ FIELD_PREP(IRDMAQPC_PKEY, roce_info->p_key) |
+ FIELD_PREP(IRDMAQPC_PDIDX, roce_info->pd_id) |
+ FIELD_PREP(IRDMAQPC_ACKCREDITS, roce_info->ack_credits) |
+ FIELD_PREP(IRDMAQPC_FLOWLABEL, udp->flow_label));
+ set_64bit_val(qp_ctx, 64,
+ FIELD_PREP(IRDMAQPC_QKEY, roce_info->qkey) |
+ FIELD_PREP(IRDMAQPC_DESTQP, roce_info->dest_qp));
+ set_64bit_val(qp_ctx, 80,
+ FIELD_PREP(IRDMAQPC_PSNNXT, udp->psn_nxt) |
+ FIELD_PREP(IRDMAQPC_LSN, udp->lsn));
+ set_64bit_val(qp_ctx, 88,
+ FIELD_PREP(IRDMAQPC_EPSN, udp->epsn));
+ set_64bit_val(qp_ctx, 96,
+ FIELD_PREP(IRDMAQPC_PSNMAX, udp->psn_max) |
+ FIELD_PREP(IRDMAQPC_PSNUNA, udp->psn_una));
+ set_64bit_val(qp_ctx, 112,
+ FIELD_PREP(IRDMAQPC_CWNDROCE, udp->cwnd));
+ set_64bit_val(qp_ctx, 128,
+ FIELD_PREP(IRDMAQPC_ERR_RQ_IDX, roce_info->err_rq_idx) |
+ FIELD_PREP(IRDMAQPC_RNRNAK_THRESH, udp->rnr_nak_thresh) |
+ FIELD_PREP(IRDMAQPC_REXMIT_THRESH, udp->rexmit_thresh) |
+ FIELD_PREP(IRDMAQPC_RTOMIN, roce_info->rtomin));
+ set_64bit_val(qp_ctx, 136,
+ FIELD_PREP(IRDMAQPC_TXCQNUM, info->send_cq_num) |
+ FIELD_PREP(IRDMAQPC_RXCQNUM, info->rcv_cq_num));
+ set_64bit_val(qp_ctx, 144,
+ FIELD_PREP(IRDMAQPC_STAT_INDEX, info->stats_idx));
+ set_64bit_val(qp_ctx, 152, ether_addr_to_u64(roce_info->mac_addr) << 16);
+ set_64bit_val(qp_ctx, 160,
+ FIELD_PREP(IRDMAQPC_ORDSIZE, roce_info->ord_size) |
+ FIELD_PREP(IRDMAQPC_IRDSIZE, irdma_sc_get_encoded_ird_size(roce_info->ird_size)) |
+ FIELD_PREP(IRDMAQPC_WRRDRSPOK, roce_info->wr_rdresp_en) |
+ FIELD_PREP(IRDMAQPC_RDOK, roce_info->rd_en) |
+ FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, info->stats_idx_valid) |
+ FIELD_PREP(IRDMAQPC_BINDEN, roce_info->bind_en) |
+ FIELD_PREP(IRDMAQPC_FASTREGEN, roce_info->fast_reg_en) |
+ FIELD_PREP(IRDMAQPC_DCQCNENABLE, roce_info->dcqcn_en) |
+ FIELD_PREP(IRDMAQPC_RCVNOICRC, roce_info->rcv_no_icrc) |
+ FIELD_PREP(IRDMAQPC_FW_CC_ENABLE, roce_info->fw_cc_enable) |
+ FIELD_PREP(IRDMAQPC_UDPRIVCQENABLE, roce_info->udprivcq_en) |
+ FIELD_PREP(IRDMAQPC_PRIVEN, roce_info->priv_mode_en) |
+ FIELD_PREP(IRDMAQPC_TIMELYENABLE, roce_info->timely_en));
+ set_64bit_val(qp_ctx, 168,
+ FIELD_PREP(IRDMAQPC_QPCOMPCTX, info->qp_compl_ctx));
+ set_64bit_val(qp_ctx, 176,
+ FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) |
+ FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) |
+ FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle));
+ set_64bit_val(qp_ctx, 184,
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR3, udp->local_ipaddr[3]) |
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR2, udp->local_ipaddr[2]));
+ set_64bit_val(qp_ctx, 192,
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR1, udp->local_ipaddr[1]) |
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR0, udp->local_ipaddr[0]));
+ set_64bit_val(qp_ctx, 200,
+ FIELD_PREP(IRDMAQPC_THIGH, roce_info->t_high) |
+ FIELD_PREP(IRDMAQPC_TLOW, roce_info->t_low));
+ set_64bit_val(qp_ctx, 208,
+ FIELD_PREP(IRDMAQPC_REMENDPOINTIDX, info->rem_endpoint_idx));
+
+ print_hex_dump_debug("WQE: QP_HOST CTX WQE", DUMP_PREFIX_OFFSET, 16,
+ 8, qp_ctx, IRDMA_QP_CTX_SIZE, false);
+}
+
+/* irdma_sc_alloc_local_mac_entry - allocate a mac entry
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum irdma_status_code
+irdma_sc_alloc_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch,
+ bool post_sq)
+{
+ __le64 *wqe;
+ u64 hdr;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return IRDMA_ERR_RING_FULL;
+
+ hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE,
+ IRDMA_CQP_OP_ALLOCATE_LOC_MAC_TABLE_ENTRY) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: ALLOCATE_LOCAL_MAC WQE",
+ DUMP_PREFIX_OFFSET, 16, 8, wqe,
+ IRDMA_CQP_WQE_SIZE * 8, false);
+
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * irdma_sc_add_local_mac_entry - add mac enry
+ * @cqp: struct for cqp hw
+ * @info:mac addr info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum irdma_status_code
+irdma_sc_add_local_mac_entry(struct irdma_sc_cqp *cqp,
+ struct irdma_local_mac_entry_info *info,
+ u64 scratch, bool post_sq)
+{
+ __le64 *wqe;
+ u64 header;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return IRDMA_ERR_RING_FULL;
+
+ set_64bit_val(wqe, 32, ether_addr_to_u64(info->mac_addr));
+
+ header = FIELD_PREP(IRDMA_CQPSQ_MLM_TABLEIDX, info->entry_idx) |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE,
+ IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, header);
+
+ print_hex_dump_debug("WQE: ADD_LOCAL_MAC WQE", DUMP_PREFIX_OFFSET, 16,
+ 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * irdma_sc_del_local_mac_entry - cqp wqe to dele local mac
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @entry_idx: index of mac entry
+ * @ignore_ref_count: to force mac adde delete
+ * @post_sq: flag for cqp db to ring
+ */
+static enum irdma_status_code
+irdma_sc_del_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch,
+ u16 entry_idx, u8 ignore_ref_count, bool post_sq)
+{
+ __le64 *wqe;
+ u64 header;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return IRDMA_ERR_RING_FULL;
+ header = FIELD_PREP(IRDMA_CQPSQ_MLM_TABLEIDX, entry_idx) |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE,
+ IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE) |
+ FIELD_PREP(IRDMA_CQPSQ_MLM_FREEENTRY, 1) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) |
+ FIELD_PREP(IRDMA_CQPSQ_MLM_IGNORE_REF_CNT, ignore_ref_count);
+
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, header);
+
+ print_hex_dump_debug("WQE: DEL_LOCAL_MAC_IPADDR WQE",
+ DUMP_PREFIX_OFFSET, 16, 8, wqe,
+ IRDMA_CQP_WQE_SIZE * 8, false);
+
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * irdma_sc_qp_setctx - set qp's context
+ * @qp: sc qp
+ * @qp_ctx: context ptr
+ * @info: ctx info
+ */
+void irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 *qp_ctx,
+ struct irdma_qp_host_ctx_info *info)
+{
+ struct irdma_iwarp_offload_info *iw;
+ struct irdma_tcp_offload_info *tcp;
+ struct irdma_sc_dev *dev;
+ u8 push_mode_en;
+ u32 push_idx;
+ u64 qw0, qw3, qw7 = 0, qw16 = 0;
+ u64 mac = 0;
+
+ iw = info->iwarp_info;
+ tcp = info->tcp_info;
+ dev = qp->dev;
+ if (iw->rcv_mark_en) {
+ qp->pfpdu.marker_len = 4;
+ qp->pfpdu.rcv_start_seq = tcp->rcv_nxt;
+ }
+ qp->user_pri = info->user_pri;
+ if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) {
+ push_mode_en = 0;
+ push_idx = 0;
+ } else {
+ push_mode_en = 1;
+ push_idx = qp->push_idx;
+ }
+ qw0 = FIELD_PREP(IRDMAQPC_RQWQESIZE, qp->qp_uk.rq_wqe_size) |
+ FIELD_PREP(IRDMAQPC_RCVTPHEN, qp->rcv_tph_en) |
+ FIELD_PREP(IRDMAQPC_XMITTPHEN, qp->xmit_tph_en) |
+ FIELD_PREP(IRDMAQPC_RQTPHEN, qp->rq_tph_en) |
+ FIELD_PREP(IRDMAQPC_SQTPHEN, qp->sq_tph_en) |
+ FIELD_PREP(IRDMAQPC_PPIDX, push_idx) |
+ FIELD_PREP(IRDMAQPC_PMENA, push_mode_en);
+
+ set_64bit_val(qp_ctx, 8, qp->sq_pa);
+ set_64bit_val(qp_ctx, 16, qp->rq_pa);
+
+ qw3 = FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) |
+ FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size);
+ if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
+ qw3 |= FIELD_PREP(IRDMAQPC_GEN1_SRCMACADDRIDX,
+ qp->src_mac_addr_idx);
+ set_64bit_val(qp_ctx, 136,
+ FIELD_PREP(IRDMAQPC_TXCQNUM, info->send_cq_num) |
+ FIELD_PREP(IRDMAQPC_RXCQNUM, info->rcv_cq_num));
+ set_64bit_val(qp_ctx, 168,
+ FIELD_PREP(IRDMAQPC_QPCOMPCTX, info->qp_compl_ctx));
+ set_64bit_val(qp_ctx, 176,
+ FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) |
+ FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) |
+ FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle) |
+ FIELD_PREP(IRDMAQPC_EXCEPTION_LAN_QUEUE, qp->ieq_qp));
+ if (info->iwarp_info_valid) {
+ qw0 |= FIELD_PREP(IRDMAQPC_DDP_VER, iw->ddp_ver) |
+ FIELD_PREP(IRDMAQPC_RDMAP_VER, iw->rdmap_ver) |
+ FIELD_PREP(IRDMAQPC_DC_TCP_EN, iw->dctcp_en) |
+ FIELD_PREP(IRDMAQPC_ECN_EN, iw->ecn_en) |
+ FIELD_PREP(IRDMAQPC_IBRDENABLE, iw->ib_rd_en) |
+ FIELD_PREP(IRDMAQPC_PDIDXHI, iw->pd_id >> 16) |
+ FIELD_PREP(IRDMAQPC_ERR_RQ_IDX_VALID,
+ iw->err_rq_idx_valid);
+ qw7 |= FIELD_PREP(IRDMAQPC_PDIDX, iw->pd_id);
+ qw16 |= FIELD_PREP(IRDMAQPC_ERR_RQ_IDX, iw->err_rq_idx) |
+ FIELD_PREP(IRDMAQPC_RTOMIN, iw->rtomin);
+ set_64bit_val(qp_ctx, 144,
+ FIELD_PREP(IRDMAQPC_Q2ADDR, qp->q2_pa >> 8) |
+ FIELD_PREP(IRDMAQPC_STAT_INDEX, info->stats_idx));
+
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
+ mac = ether_addr_to_u64(iw->mac_addr);
+
+ set_64bit_val(qp_ctx, 152,
+ mac << 16 | FIELD_PREP(IRDMAQPC_LASTBYTESENT, iw->last_byte_sent));
+ set_64bit_val(qp_ctx, 160,
+ FIELD_PREP(IRDMAQPC_ORDSIZE, iw->ord_size) |
+ FIELD_PREP(IRDMAQPC_IRDSIZE, irdma_sc_get_encoded_ird_size(iw->ird_size)) |
+ FIELD_PREP(IRDMAQPC_WRRDRSPOK, iw->wr_rdresp_en) |
+ FIELD_PREP(IRDMAQPC_RDOK, iw->rd_en) |
+ FIELD_PREP(IRDMAQPC_SNDMARKERS, iw->snd_mark_en) |
+ FIELD_PREP(IRDMAQPC_BINDEN, iw->bind_en) |
+ FIELD_PREP(IRDMAQPC_FASTREGEN, iw->fast_reg_en) |
+ FIELD_PREP(IRDMAQPC_PRIVEN, iw->priv_mode_en) |
+ FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, info->stats_idx_valid) |
+ FIELD_PREP(IRDMAQPC_IWARPMODE, 1) |
+ FIELD_PREP(IRDMAQPC_RCVMARKERS, iw->rcv_mark_en) |
+ FIELD_PREP(IRDMAQPC_ALIGNHDRS, iw->align_hdrs) |
+ FIELD_PREP(IRDMAQPC_RCVNOMPACRC, iw->rcv_no_mpa_crc) |
+ FIELD_PREP(IRDMAQPC_RCVMARKOFFSET, iw->rcv_mark_offset || !tcp ? iw->rcv_mark_offset : tcp->rcv_nxt) |
+ FIELD_PREP(IRDMAQPC_SNDMARKOFFSET, iw->snd_mark_offset || !tcp ? iw->snd_mark_offset : tcp->snd_nxt) |
+ FIELD_PREP(IRDMAQPC_TIMELYENABLE, iw->timely_en));
+ }
+ if (info->tcp_info_valid) {
+ qw0 |= FIELD_PREP(IRDMAQPC_IPV4, tcp->ipv4) |
+ FIELD_PREP(IRDMAQPC_NONAGLE, tcp->no_nagle) |
+ FIELD_PREP(IRDMAQPC_INSERTVLANTAG,
+ tcp->insert_vlan_tag) |
+ FIELD_PREP(IRDMAQPC_TIMESTAMP, tcp->time_stamp) |
+ FIELD_PREP(IRDMAQPC_LIMIT, tcp->cwnd_inc_limit) |
+ FIELD_PREP(IRDMAQPC_DROPOOOSEG, tcp->drop_ooo_seg) |
+ FIELD_PREP(IRDMAQPC_DUPACK_THRESH, tcp->dup_ack_thresh);
+
+ if ((iw->ecn_en || iw->dctcp_en) && !(tcp->tos & 0x03))
+ tcp->tos |= ECN_CODE_PT_VAL;
+
+ qw3 |= FIELD_PREP(IRDMAQPC_TTL, tcp->ttl) |
+ FIELD_PREP(IRDMAQPC_AVOIDSTRETCHACK, tcp->avoid_stretch_ack) |
+ FIELD_PREP(IRDMAQPC_TOS, tcp->tos) |
+ FIELD_PREP(IRDMAQPC_SRCPORTNUM, tcp->src_port) |
+ FIELD_PREP(IRDMAQPC_DESTPORTNUM, tcp->dst_port);
+ if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) {
+ qw3 |= FIELD_PREP(IRDMAQPC_GEN1_SRCMACADDRIDX, tcp->src_mac_addr_idx);
+
+ qp->src_mac_addr_idx = tcp->src_mac_addr_idx;
+ }
+ set_64bit_val(qp_ctx, 32,
+ FIELD_PREP(IRDMAQPC_DESTIPADDR2, tcp->dest_ip_addr[2]) |
+ FIELD_PREP(IRDMAQPC_DESTIPADDR3, tcp->dest_ip_addr[3]));
+ set_64bit_val(qp_ctx, 40,
+ FIELD_PREP(IRDMAQPC_DESTIPADDR0, tcp->dest_ip_addr[0]) |
+ FIELD_PREP(IRDMAQPC_DESTIPADDR1, tcp->dest_ip_addr[1]));
+ set_64bit_val(qp_ctx, 48,
+ FIELD_PREP(IRDMAQPC_SNDMSS, tcp->snd_mss) |
+ FIELD_PREP(IRDMAQPC_SYN_RST_HANDLING, tcp->syn_rst_handling) |
+ FIELD_PREP(IRDMAQPC_VLANTAG, tcp->vlan_tag) |
+ FIELD_PREP(IRDMAQPC_ARPIDX, tcp->arp_idx));
+ qw7 |= FIELD_PREP(IRDMAQPC_FLOWLABEL, tcp->flow_label) |
+ FIELD_PREP(IRDMAQPC_WSCALE, tcp->wscale) |
+ FIELD_PREP(IRDMAQPC_IGNORE_TCP_OPT,
+ tcp->ignore_tcp_opt) |
+ FIELD_PREP(IRDMAQPC_IGNORE_TCP_UNS_OPT,
+ tcp->ignore_tcp_uns_opt) |
+ FIELD_PREP(IRDMAQPC_TCPSTATE, tcp->tcp_state) |
+ FIELD_PREP(IRDMAQPC_RCVSCALE, tcp->rcv_wscale) |
+ FIELD_PREP(IRDMAQPC_SNDSCALE, tcp->snd_wscale);
+ set_64bit_val(qp_ctx, 72,
+ FIELD_PREP(IRDMAQPC_TIMESTAMP_RECENT, tcp->time_stamp_recent) |
+ FIELD_PREP(IRDMAQPC_TIMESTAMP_AGE, tcp->time_stamp_age));
+ set_64bit_val(qp_ctx, 80,
+ FIELD_PREP(IRDMAQPC_SNDNXT, tcp->snd_nxt) |
+ FIELD_PREP(IRDMAQPC_SNDWND, tcp->snd_wnd));
+ set_64bit_val(qp_ctx, 88,
+ FIELD_PREP(IRDMAQPC_RCVNXT, tcp->rcv_nxt) |
+ FIELD_PREP(IRDMAQPC_RCVWND, tcp->rcv_wnd));
+ set_64bit_val(qp_ctx, 96,
+ FIELD_PREP(IRDMAQPC_SNDMAX, tcp->snd_max) |
+ FIELD_PREP(IRDMAQPC_SNDUNA, tcp->snd_una));
+ set_64bit_val(qp_ctx, 104,
+ FIELD_PREP(IRDMAQPC_SRTT, tcp->srtt) |
+ FIELD_PREP(IRDMAQPC_RTTVAR, tcp->rtt_var));
+ set_64bit_val(qp_ctx, 112,
+ FIELD_PREP(IRDMAQPC_SSTHRESH, tcp->ss_thresh) |
+ FIELD_PREP(IRDMAQPC_CWND, tcp->cwnd));
+ set_64bit_val(qp_ctx, 120,
+ FIELD_PREP(IRDMAQPC_SNDWL1, tcp->snd_wl1) |
+ FIELD_PREP(IRDMAQPC_SNDWL2, tcp->snd_wl2));
+ qw16 |= FIELD_PREP(IRDMAQPC_MAXSNDWND, tcp->max_snd_window) |
+ FIELD_PREP(IRDMAQPC_REXMIT_THRESH, tcp->rexmit_thresh);
+ set_64bit_val(qp_ctx, 184,
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR3, tcp->local_ipaddr[3]) |
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR2, tcp->local_ipaddr[2]));
+ set_64bit_val(qp_ctx, 192,
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR1, tcp->local_ipaddr[1]) |
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR0, tcp->local_ipaddr[0]));
+ set_64bit_val(qp_ctx, 200,
+ FIELD_PREP(IRDMAQPC_THIGH, iw->t_high) |
+ FIELD_PREP(IRDMAQPC_TLOW, iw->t_low));
+ set_64bit_val(qp_ctx, 208,
+ FIELD_PREP(IRDMAQPC_REMENDPOINTIDX, info->rem_endpoint_idx));
+ }
+
+ set_64bit_val(qp_ctx, 0, qw0);
+ set_64bit_val(qp_ctx, 24, qw3);
+ set_64bit_val(qp_ctx, 56, qw7);
+ set_64bit_val(qp_ctx, 128, qw16);
+
+ print_hex_dump_debug("WQE: QP_HOST CTX", DUMP_PREFIX_OFFSET, 16, 8,
+ qp_ctx, IRDMA_QP_CTX_SIZE, false);
+}
+
+/**
+ * irdma_sc_alloc_stag - mr stag alloc
+ * @dev: sc device struct
+ * @info: stag info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum irdma_status_code
+irdma_sc_alloc_stag(struct irdma_sc_dev *dev,
+ struct irdma_allocate_stag_info *info, u64 scratch,
+ bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_sc_cqp *cqp;
+ u64 hdr;
+ enum irdma_page_size page_size;
+
+ if (info->page_size == 0x40000000)
+ page_size = IRDMA_PAGE_SIZE_1G;
+ else if (info->page_size == 0x200000)
+ page_size = IRDMA_PAGE_SIZE_2M;
+ else
+ page_size = IRDMA_PAGE_SIZE_4K;
+
+ cqp = dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return IRDMA_ERR_RING_FULL;
+
+ set_64bit_val(wqe, 8,
+ FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_STAGLEN, info->total_len));
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
+ set_64bit_val(wqe, 40,
+ FIELD_PREP(IRDMA_CQPSQ_STAG_HMCFNIDX, info->hmc_fcn_index));
+
+ if (info->chunk_size)
+ set_64bit_val(wqe, 48,
+ FIELD_PREP(IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX, info->first_pm_pbl_idx));
+
+ hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_ALLOC_STAG) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_MR, 1) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_ARIGHTS, info->access_rights) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_LPBLSIZE, info->chunk_size) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_HPAGESIZE, page_size) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_REMACCENABLED, info->remote_access) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_USEHMCFNIDX, info->use_hmc_fcn_index) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_USEPFRID, info->use_pf_rid) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: ALLOC_STAG WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_mr_reg_non_shared - non-shared mr registration
+ * @dev: sc device struct
+ * @info: mr info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum irdma_status_code
+irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev,
+ struct irdma_reg_ns_stag_info *info, u64 scratch,
+ bool post_sq)
+{
+ __le64 *wqe;
+ u64 fbo;
+ struct irdma_sc_cqp *cqp;
+ u64 hdr;
+ u32 pble_obj_cnt;
+ bool remote_access;
+ u8 addr_type;
+ enum irdma_page_size page_size;
+
+ if (info->page_size == 0x40000000)
+ page_size = IRDMA_PAGE_SIZE_1G;
+ else if (info->page_size == 0x200000)
+ page_size = IRDMA_PAGE_SIZE_2M;
+ else if (info->page_size == 0x1000)
+ page_size = IRDMA_PAGE_SIZE_4K;
+ else
+ return IRDMA_ERR_PARAM;
+
+ if (info->access_rights & (IRDMA_ACCESS_FLAGS_REMOTEREAD_ONLY |
+ IRDMA_ACCESS_FLAGS_REMOTEWRITE_ONLY))
+ remote_access = true;
+ else
+ remote_access = false;
+
+ pble_obj_cnt = dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
+ if (info->chunk_size && info->first_pm_pbl_index >= pble_obj_cnt)
+ return IRDMA_ERR_INVALID_PBLE_INDEX;
+
+ cqp = dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return IRDMA_ERR_RING_FULL;
+ fbo = info->va & (info->page_size - 1);
+
+ set_64bit_val(wqe, 0,
+ (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED ?
+ info->va : fbo));
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMA_CQPSQ_STAG_STAGLEN, info->total_len) |
+ FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMA_CQPSQ_STAG_KEY, info->stag_key) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
+ if (!info->chunk_size) {
+ set_64bit_val(wqe, 32, info->reg_addr_pa);
+ set_64bit_val(wqe, 48, 0);
+ } else {
+ set_64bit_val(wqe, 32, 0);
+ set_64bit_val(wqe, 48,
+ FIELD_PREP(IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX, info->first_pm_pbl_index));
+ }
+ set_64bit_val(wqe, 40, info->hmc_fcn_index);
+ set_64bit_val(wqe, 56, 0);
+
+ addr_type = (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED) ? 1 : 0;
+ hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_REG_MR) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_MR, 1) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_LPBLSIZE, info->chunk_size) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_HPAGESIZE, page_size) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_ARIGHTS, info->access_rights) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_REMACCENABLED, remote_access) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_VABASEDTO, addr_type) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_USEHMCFNIDX, info->use_hmc_fcn_index) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_USEPFRID, info->use_pf_rid) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: MR_REG_NS WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_dealloc_stag - deallocate stag
+ * @dev: sc device struct
+ * @info: dealloc stag info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum irdma_status_code
+irdma_sc_dealloc_stag(struct irdma_sc_dev *dev,
+ struct irdma_dealloc_stag_info *info, u64 scratch,
+ bool post_sq)
+{
+ u64 hdr;
+ __le64 *wqe;
+ struct irdma_sc_cqp *cqp;
+
+ cqp = dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return IRDMA_ERR_RING_FULL;
+
+ set_64bit_val(wqe, 8,
+ FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
+
+ hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DEALLOC_STAG) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_MR, info->mr) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: DEALLOC_STAG WQE", DUMP_PREFIX_OFFSET, 16,
+ 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_mw_alloc - mw allocate
+ * @dev: sc device struct
+ * @info: memory window allocation information
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum irdma_status_code
+irdma_sc_mw_alloc(struct irdma_sc_dev *dev, struct irdma_mw_alloc_info *info,
+ u64 scratch, bool post_sq)
+{
+ u64 hdr;
+ struct irdma_sc_cqp *cqp;
+ __le64 *wqe;
+
+ cqp = dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return IRDMA_ERR_RING_FULL;
+
+ set_64bit_val(wqe, 8,
+ FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->mw_stag_index));
+
+ hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_ALLOC_STAG) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_MWTYPE, info->mw_wide) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_MW1_BIND_DONT_VLDT_KEY,
+ info->mw1_bind_dont_vldt_key) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: MW_ALLOC WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_mr_fast_register - Posts RDMA fast register mr WR to iwarp qp
+ * @qp: sc qp struct
+ * @info: fast mr info
+ * @post_sq: flag for cqp db to ring
+ */
+enum irdma_status_code
+irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
+ struct irdma_fast_reg_stag_info *info, bool post_sq)
+{
+ u64 temp, hdr;
+ __le64 *wqe;
+ u32 wqe_idx;
+ enum irdma_page_size page_size;
+ struct irdma_post_sq_info sq_info = {};
+
+ if (info->page_size == 0x40000000)
+ page_size = IRDMA_PAGE_SIZE_1G;
+ else if (info->page_size == 0x200000)
+ page_size = IRDMA_PAGE_SIZE_2M;
+ else
+ page_size = IRDMA_PAGE_SIZE_4K;
+
+ sq_info.wr_id = info->wr_id;
+ sq_info.signaled = info->signaled;
+ sq_info.push_wqe = info->push_wqe;
+
+ wqe = irdma_qp_get_next_send_wqe(&qp->qp_uk, &wqe_idx,
+ IRDMA_QP_WQE_MIN_QUANTA, 0, &sq_info);
+ if (!wqe)
+ return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
+
+ irdma_clr_wqes(&qp->qp_uk, wqe_idx);
+
+ ibdev_dbg(to_ibdev(qp->dev),
+ "MR: wr_id[%llxh] wqe_idx[%04d] location[%p]\n",
+ info->wr_id, wqe_idx,
+ &qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid);
+
+ temp = (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED) ?
+ (uintptr_t)info->va : info->fbo;
+ set_64bit_val(wqe, 0, temp);
+
+ temp = FIELD_GET(IRDMAQPSQ_FIRSTPMPBLIDXHI,
+ info->first_pm_pbl_index >> 16);
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMAQPSQ_FIRSTPMPBLIDXHI, temp) |
+ FIELD_PREP(IRDMAQPSQ_PBLADDR >> IRDMA_HW_PAGE_SHIFT, info->reg_addr_pa));
+ set_64bit_val(wqe, 16,
+ info->total_len |
+ FIELD_PREP(IRDMAQPSQ_FIRSTPMPBLIDXLO, info->first_pm_pbl_index));
+
+ hdr = FIELD_PREP(IRDMAQPSQ_STAGKEY, info->stag_key) |
+ FIELD_PREP(IRDMAQPSQ_STAGINDEX, info->stag_idx) |
+ FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_FAST_REGISTER) |
+ FIELD_PREP(IRDMAQPSQ_LPBLSIZE, info->chunk_size) |
+ FIELD_PREP(IRDMAQPSQ_HPAGESIZE, page_size) |
+ FIELD_PREP(IRDMAQPSQ_STAGRIGHTS, info->access_rights) |
+ FIELD_PREP(IRDMAQPSQ_VABASEDTO, info->addr_type) |
+ FIELD_PREP(IRDMAQPSQ_PUSHWQE, (sq_info.push_wqe ? 1 : 0)) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: FAST_REG WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_QP_WQE_MIN_SIZE, false);
+ if (sq_info.push_wqe) {
+ irdma_qp_push_wqe(&qp->qp_uk, wqe, IRDMA_QP_WQE_MIN_QUANTA,
+ wqe_idx, post_sq);
+ } else {
+ if (post_sq)
+ irdma_uk_qp_post_wr(&qp->qp_uk);
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_sc_gen_rts_ae - request AE generated after RTS
+ * @qp: sc qp struct
+ */
+static void irdma_sc_gen_rts_ae(struct irdma_sc_qp *qp)
+{
+ __le64 *wqe;
+ u64 hdr;
+ struct irdma_qp_uk *qp_uk;
+
+ qp_uk = &qp->qp_uk;
+
+ wqe = qp_uk->sq_base[1].elem;
+
+ hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, 1) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+ print_hex_dump_debug("QP: NOP W/LOCAL FENCE WQE", DUMP_PREFIX_OFFSET,
+ 16, 8, wqe, IRDMA_QP_WQE_MIN_SIZE, false);
+
+ wqe = qp_uk->sq_base[2].elem;
+ hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_GEN_RTS_AE) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+ print_hex_dump_debug("QP: CONN EST WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_QP_WQE_MIN_SIZE, false);
+}
+
+/**
+ * irdma_sc_send_lsmm - send last streaming mode message
+ * @qp: sc qp struct
+ * @lsmm_buf: buffer with lsmm message
+ * @size: size of lsmm buffer
+ * @stag: stag of lsmm buffer
+ */
+void irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size,
+ irdma_stag stag)
+{
+ __le64 *wqe;
+ u64 hdr;
+ struct irdma_qp_uk *qp_uk;
+
+ qp_uk = &qp->qp_uk;
+ wqe = qp_uk->sq_base->elem;
+
+ set_64bit_val(wqe, 0, (uintptr_t)lsmm_buf);
+ if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, size) |
+ FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, stag));
+ } else {
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMAQPSQ_FRAG_LEN, size) |
+ FIELD_PREP(IRDMAQPSQ_FRAG_STAG, stag) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity));
+ }
+ set_64bit_val(wqe, 16, 0);
+
+ hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_SEND) |
+ FIELD_PREP(IRDMAQPSQ_STREAMMODE, 1) |
+ FIELD_PREP(IRDMAQPSQ_WAITFORRCVPDU, 1) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: SEND_LSMM WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_QP_WQE_MIN_SIZE, false);
+
+ if (qp->dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE)
+ irdma_sc_gen_rts_ae(qp);
+}
+
+/**
+ * irdma_sc_send_lsmm_nostag - for privilege qp
+ * @qp: sc qp struct
+ * @lsmm_buf: buffer with lsmm message
+ * @size: size of lsmm buffer
+ */
+void irdma_sc_send_lsmm_nostag(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size)
+{
+ __le64 *wqe;
+ u64 hdr;
+ struct irdma_qp_uk *qp_uk;
+
+ qp_uk = &qp->qp_uk;
+ wqe = qp_uk->sq_base->elem;
+
+ set_64bit_val(wqe, 0, (uintptr_t)lsmm_buf);
+
+ if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1)
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, size));
+ else
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMAQPSQ_FRAG_LEN, size) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity));
+ set_64bit_val(wqe, 16, 0);
+
+ hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_SEND) |
+ FIELD_PREP(IRDMAQPSQ_STREAMMODE, 1) |
+ FIELD_PREP(IRDMAQPSQ_WAITFORRCVPDU, 1) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: SEND_LSMM_NOSTAG WQE", DUMP_PREFIX_OFFSET,
+ 16, 8, wqe, IRDMA_QP_WQE_MIN_SIZE, false);
+}
+
+/**
+ * irdma_sc_send_rtt - send last read0 or write0
+ * @qp: sc qp struct
+ * @read: Do read0 or write0
+ */
+void irdma_sc_send_rtt(struct irdma_sc_qp *qp, bool read)
+{
+ __le64 *wqe;
+ u64 hdr;
+ struct irdma_qp_uk *qp_uk;
+
+ qp_uk = &qp->qp_uk;
+ wqe = qp_uk->sq_base->elem;
+
+ set_64bit_val(wqe, 0, 0);
+ set_64bit_val(wqe, 16, 0);
+ if (read) {
+ if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, 0xabcd));
+ } else {
+ set_64bit_val(wqe, 8,
+ (u64)0xabcd | FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity));
+ }
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, 0x1234) |
+ FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_READ) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
+
+ } else {
+ if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
+ set_64bit_val(wqe, 8, 0);
+ } else {
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity));
+ }
+ hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_WRITE) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
+ }
+
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: RTR WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe,
+ IRDMA_QP_WQE_MIN_SIZE, false);
+
+ if (qp->dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE)
+ irdma_sc_gen_rts_ae(qp);
+}
+
+/**
+ * irdma_iwarp_opcode - determine if incoming is rdma layer
+ * @info: aeq info for the packet
+ * @pkt: packet for error
+ */
+static u32 irdma_iwarp_opcode(struct irdma_aeqe_info *info, u8 *pkt)
+{
+ __be16 *mpa;
+ u32 opcode = 0xffffffff;
+
+ if (info->q2_data_written) {
+ mpa = (__be16 *)pkt;
+ opcode = ntohs(mpa[1]) & 0xf;
+ }
+
+ return opcode;
+}
+
+/**
+ * irdma_locate_mpa - return pointer to mpa in the pkt
+ * @pkt: packet with data
+ */
+static u8 *irdma_locate_mpa(u8 *pkt)
+{
+ /* skip over ethernet header */
+ pkt += IRDMA_MAC_HLEN;
+
+ /* Skip over IP and TCP headers */
+ pkt += 4 * (pkt[0] & 0x0f);
+ pkt += 4 * ((pkt[12] >> 4) & 0x0f);
+
+ return pkt;
+}
+
+/**
+ * irdma_bld_termhdr_ctrl - setup terminate hdr control fields
+ * @qp: sc qp ptr for pkt
+ * @hdr: term hdr
+ * @opcode: flush opcode for termhdr
+ * @layer_etype: error layer + error type
+ * @err: error cod ein the header
+ */
+static void irdma_bld_termhdr_ctrl(struct irdma_sc_qp *qp,
+ struct irdma_terminate_hdr *hdr,
+ enum irdma_flush_opcode opcode,
+ u8 layer_etype, u8 err)
+{
+ qp->flush_code = opcode;
+ hdr->layer_etype = layer_etype;
+ hdr->error_code = err;
+}
+
+/**
+ * irdma_bld_termhdr_ddp_rdma - setup ddp and rdma hdrs in terminate hdr
+ * @pkt: ptr to mpa in offending pkt
+ * @hdr: term hdr
+ * @copy_len: offending pkt length to be copied to term hdr
+ * @is_tagged: DDP tagged or untagged
+ */
+static void irdma_bld_termhdr_ddp_rdma(u8 *pkt, struct irdma_terminate_hdr *hdr,
+ int *copy_len, u8 *is_tagged)
+{
+ u16 ddp_seg_len;
+
+ ddp_seg_len = ntohs(*(__be16 *)pkt);
+ if (ddp_seg_len) {
+ *copy_len = 2;
+ hdr->hdrct = DDP_LEN_FLAG;
+ if (pkt[2] & 0x80) {
+ *is_tagged = 1;
+ if (ddp_seg_len >= TERM_DDP_LEN_TAGGED) {
+ *copy_len += TERM_DDP_LEN_TAGGED;
+ hdr->hdrct |= DDP_HDR_FLAG;
+ }
+ } else {
+ if (ddp_seg_len >= TERM_DDP_LEN_UNTAGGED) {
+ *copy_len += TERM_DDP_LEN_UNTAGGED;
+ hdr->hdrct |= DDP_HDR_FLAG;
+ }
+ if (ddp_seg_len >= (TERM_DDP_LEN_UNTAGGED + TERM_RDMA_LEN) &&
+ ((pkt[3] & RDMA_OPCODE_M) == RDMA_READ_REQ_OPCODE)) {
+ *copy_len += TERM_RDMA_LEN;
+ hdr->hdrct |= RDMA_HDR_FLAG;
+ }
+ }
+ }
+}
+
+/**
+ * irdma_bld_terminate_hdr - build terminate message header
+ * @qp: qp associated with received terminate AE
+ * @info: the struct contiaing AE information
+ */
+static int irdma_bld_terminate_hdr(struct irdma_sc_qp *qp,
+ struct irdma_aeqe_info *info)
+{
+ u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
+ int copy_len = 0;
+ u8 is_tagged = 0;
+ u32 opcode;
+ struct irdma_terminate_hdr *termhdr;
+
+ termhdr = (struct irdma_terminate_hdr *)qp->q2_buf;
+ memset(termhdr, 0, Q2_BAD_FRAME_OFFSET);
+
+ if (info->q2_data_written) {
+ pkt = irdma_locate_mpa(pkt);
+ irdma_bld_termhdr_ddp_rdma(pkt, termhdr, &copy_len, &is_tagged);
+ }
+
+ opcode = irdma_iwarp_opcode(info, pkt);
+ qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ qp->sq_flush_code = info->sq;
+ qp->rq_flush_code = info->rq;
+
+ switch (info->ae_id) {
+ case IRDMA_AE_AMP_UNALLOCATED_STAG:
+ qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ if (opcode == IRDMA_OP_TYPE_RDMA_WRITE)
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR,
+ (LAYER_DDP << 4) | DDP_TAGGED_BUF,
+ DDP_TAGGED_INV_STAG);
+ else
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
+ RDMAP_INV_STAG);
+ break;
+ case IRDMA_AE_AMP_BOUNDS_VIOLATION:
+ qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ if (info->q2_data_written)
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR,
+ (LAYER_DDP << 4) | DDP_TAGGED_BUF,
+ DDP_TAGGED_BOUNDS);
+ else
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
+ RDMAP_INV_BOUNDS);
+ break;
+ case IRDMA_AE_AMP_BAD_PD:
+ switch (opcode) {
+ case IRDMA_OP_TYPE_RDMA_WRITE:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR,
+ (LAYER_DDP << 4) | DDP_TAGGED_BUF,
+ DDP_TAGGED_UNASSOC_STAG);
+ break;
+ case IRDMA_OP_TYPE_SEND_INV:
+ case IRDMA_OP_TYPE_SEND_SOL_INV:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
+ RDMAP_CANT_INV_STAG);
+ break;
+ default:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
+ RDMAP_UNASSOC_STAG);
+ }
+ break;
+ case IRDMA_AE_AMP_INVALID_STAG:
+ qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
+ RDMAP_INV_STAG);
+ break;
+ case IRDMA_AE_AMP_BAD_QP:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_QP_OP_ERR,
+ (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
+ DDP_UNTAGGED_INV_QN);
+ break;
+ case IRDMA_AE_AMP_BAD_STAG_KEY:
+ case IRDMA_AE_AMP_BAD_STAG_INDEX:
+ qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ switch (opcode) {
+ case IRDMA_OP_TYPE_SEND_INV:
+ case IRDMA_OP_TYPE_SEND_SOL_INV:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_OP_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
+ RDMAP_CANT_INV_STAG);
+ break;
+ default:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
+ RDMAP_INV_STAG);
+ }
+ break;
+ case IRDMA_AE_AMP_RIGHTS_VIOLATION:
+ case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
+ case IRDMA_AE_PRIV_OPERATION_DENIED:
+ qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
+ RDMAP_ACCESS);
+ break;
+ case IRDMA_AE_AMP_TO_WRAP:
+ qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
+ RDMAP_TO_WRAP);
+ break;
+ case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
+ (LAYER_MPA << 4) | DDP_LLP, MPA_CRC);
+ break;
+ case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_LEN_ERR,
+ (LAYER_DDP << 4) | DDP_CATASTROPHIC,
+ DDP_CATASTROPHIC_LOCAL);
+ break;
+ case IRDMA_AE_LCE_QP_CATASTROPHIC:
+ case IRDMA_AE_DDP_NO_L_BIT:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_FATAL_ERR,
+ (LAYER_DDP << 4) | DDP_CATASTROPHIC,
+ DDP_CATASTROPHIC_LOCAL);
+ break;
+ case IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
+ (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
+ DDP_UNTAGGED_INV_MSN_RANGE);
+ break;
+ case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
+ qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_LEN_ERR,
+ (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
+ DDP_UNTAGGED_INV_TOO_LONG);
+ break;
+ case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION:
+ if (is_tagged)
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
+ (LAYER_DDP << 4) | DDP_TAGGED_BUF,
+ DDP_TAGGED_INV_DDP_VER);
+ else
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
+ (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
+ DDP_UNTAGGED_INV_DDP_VER);
+ break;
+ case IRDMA_AE_DDP_UBE_INVALID_MO:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
+ (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
+ DDP_UNTAGGED_INV_MO);
+ break;
+ case IRDMA_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_OP_ERR,
+ (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
+ DDP_UNTAGGED_INV_MSN_NO_BUF);
+ break;
+ case IRDMA_AE_DDP_UBE_INVALID_QN:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
+ (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
+ DDP_UNTAGGED_INV_QN);
+ break;
+ case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
+ RDMAP_INV_RDMAP_VER);
+ break;
+ default:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_FATAL_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
+ RDMAP_UNSPECIFIED);
+ break;
+ }
+
+ if (copy_len)
+ memcpy(termhdr + 1, pkt, copy_len);
+
+ return sizeof(struct irdma_terminate_hdr) + copy_len;
+}
+
+/**
+ * irdma_terminate_send_fin() - Send fin for terminate message
+ * @qp: qp associated with received terminate AE
+ */
+void irdma_terminate_send_fin(struct irdma_sc_qp *qp)
+{
+ irdma_term_modify_qp(qp, IRDMA_QP_STATE_TERMINATE,
+ IRDMAQP_TERM_SEND_FIN_ONLY, 0);
+}
+
+/**
+ * irdma_terminate_connection() - Bad AE and send terminate to remote QP
+ * @qp: qp associated with received terminate AE
+ * @info: the struct contiaing AE information
+ */
+void irdma_terminate_connection(struct irdma_sc_qp *qp,
+ struct irdma_aeqe_info *info)
+{
+ u8 termlen = 0;
+
+ if (qp->term_flags & IRDMA_TERM_SENT)
+ return;
+
+ termlen = irdma_bld_terminate_hdr(qp, info);
+ irdma_terminate_start_timer(qp);
+ qp->term_flags |= IRDMA_TERM_SENT;
+ irdma_term_modify_qp(qp, IRDMA_QP_STATE_TERMINATE,
+ IRDMAQP_TERM_SEND_TERM_ONLY, termlen);
+}
+
+/**
+ * irdma_terminate_received - handle terminate received AE
+ * @qp: qp associated with received terminate AE
+ * @info: the struct contiaing AE information
+ */
+void irdma_terminate_received(struct irdma_sc_qp *qp,
+ struct irdma_aeqe_info *info)
+{
+ u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
+ __be32 *mpa;
+ u8 ddp_ctl;
+ u8 rdma_ctl;
+ u16 aeq_id = 0;
+ struct irdma_terminate_hdr *termhdr;
+
+ mpa = (__be32 *)irdma_locate_mpa(pkt);
+ if (info->q2_data_written) {
+ /* did not validate the frame - do it now */
+ ddp_ctl = (ntohl(mpa[0]) >> 8) & 0xff;
+ rdma_ctl = ntohl(mpa[0]) & 0xff;
+ if ((ddp_ctl & 0xc0) != 0x40)
+ aeq_id = IRDMA_AE_LCE_QP_CATASTROPHIC;
+ else if ((ddp_ctl & 0x03) != 1)
+ aeq_id = IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION;
+ else if (ntohl(mpa[2]) != 2)
+ aeq_id = IRDMA_AE_DDP_UBE_INVALID_QN;
+ else if (ntohl(mpa[3]) != 1)
+ aeq_id = IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN;
+ else if (ntohl(mpa[4]) != 0)
+ aeq_id = IRDMA_AE_DDP_UBE_INVALID_MO;
+ else if ((rdma_ctl & 0xc0) != 0x40)
+ aeq_id = IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION;
+
+ info->ae_id = aeq_id;
+ if (info->ae_id) {
+ /* Bad terminate recvd - send back a terminate */
+ irdma_terminate_connection(qp, info);
+ return;
+ }
+ }
+
+ qp->term_flags |= IRDMA_TERM_RCVD;
+ qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ termhdr = (struct irdma_terminate_hdr *)&mpa[5];
+ if (termhdr->layer_etype == RDMAP_REMOTE_PROT ||
+ termhdr->layer_etype == RDMAP_REMOTE_OP) {
+ irdma_terminate_done(qp, 0);
+ } else {
+ irdma_terminate_start_timer(qp);
+ irdma_terminate_send_fin(qp);
+ }
+}
+
+static enum irdma_status_code irdma_null_ws_add(struct irdma_sc_vsi *vsi,
+ u8 user_pri)
+{
+ return 0;
+}
+
+static void irdma_null_ws_remove(struct irdma_sc_vsi *vsi, u8 user_pri)
+{
+ /* do nothing */
+}
+
+static void irdma_null_ws_reset(struct irdma_sc_vsi *vsi)
+{
+ /* do nothing */
+}
+
+/**
+ * irdma_sc_vsi_init - Init the vsi structure
+ * @vsi: pointer to vsi structure to initialize
+ * @info: the info used to initialize the vsi struct
+ */
+void irdma_sc_vsi_init(struct irdma_sc_vsi *vsi,
+ struct irdma_vsi_init_info *info)
+{
+ struct irdma_l2params *l2p;
+ int i;
+
+ vsi->dev = info->dev;
+ vsi->back_vsi = info->back_vsi;
+ vsi->register_qset = info->register_qset;
+ vsi->unregister_qset = info->unregister_qset;
+ vsi->mtu = info->params->mtu;
+ vsi->exception_lan_q = info->exception_lan_q;
+ vsi->vsi_idx = info->pf_data_vsi_num;
+ if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
+ vsi->fcn_id = info->dev->hmc_fn_id;
+
+ l2p = info->params;
+ vsi->qos_rel_bw = l2p->vsi_rel_bw;
+ vsi->qos_prio_type = l2p->vsi_prio_type;
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
+ if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
+ vsi->qos[i].qs_handle = l2p->qs_handle_list[i];
+ vsi->qos[i].traffic_class = info->params->up2tc[i];
+ vsi->qos[i].rel_bw =
+ l2p->tc_info[vsi->qos[i].traffic_class].rel_bw;
+ vsi->qos[i].prio_type =
+ l2p->tc_info[vsi->qos[i].traffic_class].prio_type;
+ vsi->qos[i].valid = false;
+ mutex_init(&vsi->qos[i].qos_mutex);
+ INIT_LIST_HEAD(&vsi->qos[i].qplist);
+ }
+ if (vsi->register_qset) {
+ vsi->dev->ws_add = irdma_ws_add;
+ vsi->dev->ws_remove = irdma_ws_remove;
+ vsi->dev->ws_reset = irdma_ws_reset;
+ } else {
+ vsi->dev->ws_add = irdma_null_ws_add;
+ vsi->dev->ws_remove = irdma_null_ws_remove;
+ vsi->dev->ws_reset = irdma_null_ws_reset;
+ }
+}
+
+/**
+ * irdma_get_fcn_id - Return the function id
+ * @vsi: pointer to the vsi
+ */
+static u8 irdma_get_fcn_id(struct irdma_sc_vsi *vsi)
+{
+ struct irdma_stats_inst_info stats_info = {};
+ struct irdma_sc_dev *dev = vsi->dev;
+ u8 fcn_id = IRDMA_INVALID_FCN_ID;
+ u8 start_idx, max_stats, i;
+
+ if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1) {
+ if (!irdma_cqp_stats_inst_cmd(vsi, IRDMA_OP_STATS_ALLOCATE,
+ &stats_info))
+ return stats_info.stats_idx;
+ }
+
+ start_idx = 1;
+ max_stats = 16;
+ for (i = start_idx; i < max_stats; i++)
+ if (!dev->fcn_id_array[i]) {
+ fcn_id = i;
+ dev->fcn_id_array[i] = true;
+ break;
+ }
+
+ return fcn_id;
+}
+
+/**
+ * irdma_vsi_stats_init - Initialize the vsi statistics
+ * @vsi: pointer to the vsi structure
+ * @info: The info structure used for initialization
+ */
+enum irdma_status_code irdma_vsi_stats_init(struct irdma_sc_vsi *vsi,
+ struct irdma_vsi_stats_info *info)
+{
+ u8 fcn_id = info->fcn_id;
+ struct irdma_dma_mem *stats_buff_mem;
+
+ vsi->pestat = info->pestat;
+ vsi->pestat->hw = vsi->dev->hw;
+ vsi->pestat->vsi = vsi;
+ stats_buff_mem = &vsi->pestat->gather_info.stats_buff_mem;
+ stats_buff_mem->size = ALIGN(IRDMA_GATHER_STATS_BUF_SIZE * 2, 1);
+ stats_buff_mem->va = dma_alloc_coherent(vsi->pestat->hw->device,
+ stats_buff_mem->size,
+ &stats_buff_mem->pa,
+ GFP_KERNEL);
+ if (!stats_buff_mem->va)
+ return IRDMA_ERR_NO_MEMORY;
+
+ vsi->pestat->gather_info.gather_stats_va = stats_buff_mem->va;
+ vsi->pestat->gather_info.last_gather_stats_va =
+ (void *)((uintptr_t)stats_buff_mem->va +
+ IRDMA_GATHER_STATS_BUF_SIZE);
+
+ irdma_hw_stats_start_timer(vsi);
+ if (info->alloc_fcn_id)
+ fcn_id = irdma_get_fcn_id(vsi);
+ if (fcn_id == IRDMA_INVALID_FCN_ID)
+ goto stats_error;
+
+ vsi->stats_fcn_id_alloc = info->alloc_fcn_id;
+ vsi->fcn_id = fcn_id;
+ if (info->alloc_fcn_id) {
+ vsi->pestat->gather_info.use_stats_inst = true;
+ vsi->pestat->gather_info.stats_inst_index = fcn_id;
+ }
+
+ return 0;
+
+stats_error:
+ dma_free_coherent(vsi->pestat->hw->device, stats_buff_mem->size,
+ stats_buff_mem->va, stats_buff_mem->pa);
+ stats_buff_mem->va = NULL;
+
+ return IRDMA_ERR_CQP_COMPL_ERROR;
+}
+
+/**
+ * irdma_vsi_stats_free - Free the vsi stats
+ * @vsi: pointer to the vsi structure
+ */
+void irdma_vsi_stats_free(struct irdma_sc_vsi *vsi)
+{
+ struct irdma_stats_inst_info stats_info = {};
+ u8 fcn_id = vsi->fcn_id;
+ struct irdma_sc_dev *dev = vsi->dev;
+
+ if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1) {
+ if (vsi->stats_fcn_id_alloc) {
+ stats_info.stats_idx = vsi->fcn_id;
+ irdma_cqp_stats_inst_cmd(vsi, IRDMA_OP_STATS_FREE,
+ &stats_info);
+ }
+ } else {
+ if (vsi->stats_fcn_id_alloc &&
+ fcn_id < vsi->dev->hw_attrs.max_stat_inst)
+ vsi->dev->fcn_id_array[fcn_id] = false;
+ }
+
+ if (!vsi->pestat)
+ return;
+ irdma_hw_stats_stop_timer(vsi);
+ dma_free_coherent(vsi->pestat->hw->device,
+ vsi->pestat->gather_info.stats_buff_mem.size,
+ vsi->pestat->gather_info.stats_buff_mem.va,
+ vsi->pestat->gather_info.stats_buff_mem.pa);
+ vsi->pestat->gather_info.stats_buff_mem.va = NULL;
+}
+
+/**
+ * irdma_get_encoded_wqe_size - given wq size, returns hardware encoded size
+ * @wqsize: size of the wq (sq, rq) to encoded_size
+ * @queue_type: queue type selected for the calculation algorithm
+ */
+u8 irdma_get_encoded_wqe_size(u32 wqsize, enum irdma_queue_type queue_type)
+{
+ u8 encoded_size = 0;
+
+ /* cqp sq's hw coded value starts from 1 for size of 4
+ * while it starts from 0 for qp' wq's.
+ */
+ if (queue_type == IRDMA_QUEUE_TYPE_CQP)
+ encoded_size = 1;
+ wqsize >>= 2;
+ while (wqsize >>= 1)
+ encoded_size++;
+
+ return encoded_size;
+}
+
+/**
+ * irdma_sc_gather_stats - collect the statistics
+ * @cqp: struct for cqp hw
+ * @info: gather stats info structure
+ * @scratch: u64 saved to be used during cqp completion
+ */
+static enum irdma_status_code
+irdma_sc_gather_stats(struct irdma_sc_cqp *cqp,
+ struct irdma_stats_gather_info *info, u64 scratch)
+{
+ __le64 *wqe;
+ u64 temp;
+
+ if (info->stats_buff_mem.size < IRDMA_GATHER_STATS_BUF_SIZE)
+ return IRDMA_ERR_BUF_TOO_SHORT;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return IRDMA_ERR_RING_FULL;
+
+ set_64bit_val(wqe, 40,
+ FIELD_PREP(IRDMA_CQPSQ_STATS_HMC_FCN_INDEX, info->hmc_fcn_index));
+ set_64bit_val(wqe, 32, info->stats_buff_mem.pa);
+
+ temp = FIELD_PREP(IRDMA_CQPSQ_STATS_WQEVALID, cqp->polarity) |
+ FIELD_PREP(IRDMA_CQPSQ_STATS_USE_INST, info->use_stats_inst) |
+ FIELD_PREP(IRDMA_CQPSQ_STATS_INST_INDEX,
+ info->stats_inst_index) |
+ FIELD_PREP(IRDMA_CQPSQ_STATS_USE_HMC_FCN_INDEX,
+ info->use_hmc_fcn_index) |
+ FIELD_PREP(IRDMA_CQPSQ_STATS_OP, IRDMA_CQP_OP_GATHER_STATS);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, temp);
+
+ print_hex_dump_debug("STATS: GATHER_STATS WQE", DUMP_PREFIX_OFFSET,
+ 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+
+ irdma_sc_cqp_post_sq(cqp);
+ ibdev_dbg(to_ibdev(cqp->dev),
+ "STATS: CQP SQ head 0x%x tail 0x%x size 0x%x\n",
+ cqp->sq_ring.head, cqp->sq_ring.tail, cqp->sq_ring.size);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_manage_stats_inst - allocate or free stats instance
+ * @cqp: struct for cqp hw
+ * @info: stats info structure
+ * @alloc: alloc vs. delete flag
+ * @scratch: u64 saved to be used during cqp completion
+ */
+static enum irdma_status_code
+irdma_sc_manage_stats_inst(struct irdma_sc_cqp *cqp,
+ struct irdma_stats_inst_info *info, bool alloc,
+ u64 scratch)
+{
+ __le64 *wqe;
+ u64 temp;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return IRDMA_ERR_RING_FULL;
+
+ set_64bit_val(wqe, 40,
+ FIELD_PREP(IRDMA_CQPSQ_STATS_HMC_FCN_INDEX, info->hmc_fn_id));
+ temp = FIELD_PREP(IRDMA_CQPSQ_STATS_WQEVALID, cqp->polarity) |
+ FIELD_PREP(IRDMA_CQPSQ_STATS_ALLOC_INST, alloc) |
+ FIELD_PREP(IRDMA_CQPSQ_STATS_USE_HMC_FCN_INDEX,
+ info->use_hmc_fcn_index) |
+ FIELD_PREP(IRDMA_CQPSQ_STATS_INST_INDEX, info->stats_idx) |
+ FIELD_PREP(IRDMA_CQPSQ_STATS_OP, IRDMA_CQP_OP_MANAGE_STATS);
+
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, temp);
+
+ print_hex_dump_debug("WQE: MANAGE_STATS WQE", DUMP_PREFIX_OFFSET, 16,
+ 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+
+ irdma_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * irdma_sc_set_up_map - set the up map table
+ * @cqp: struct for cqp hw
+ * @info: User priority map info
+ * @scratch: u64 saved to be used during cqp completion
+ */
+static enum irdma_status_code irdma_sc_set_up_map(struct irdma_sc_cqp *cqp,
+ struct irdma_up_info *info,
+ u64 scratch)
+{
+ __le64 *wqe;
+ u64 temp = 0;
+ int i;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return IRDMA_ERR_RING_FULL;
+
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++)
+ temp |= (u64)info->map[i] << (i * 8);
+
+ set_64bit_val(wqe, 0, temp);
+ set_64bit_val(wqe, 40,
+ FIELD_PREP(IRDMA_CQPSQ_UP_CNPOVERRIDE, info->cnp_up_override) |
+ FIELD_PREP(IRDMA_CQPSQ_UP_HMCFCNIDX, info->hmc_fcn_idx));
+
+ temp = FIELD_PREP(IRDMA_CQPSQ_UP_WQEVALID, cqp->polarity) |
+ FIELD_PREP(IRDMA_CQPSQ_UP_USEVLAN, info->use_vlan) |
+ FIELD_PREP(IRDMA_CQPSQ_UP_USEOVERRIDE,
+ info->use_cnp_up_override) |
+ FIELD_PREP(IRDMA_CQPSQ_UP_OP, IRDMA_CQP_OP_UP_MAP);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, temp);
+
+ print_hex_dump_debug("WQE: UPMAP WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe,
+ IRDMA_CQP_WQE_SIZE * 8, false);
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_manage_ws_node - create/modify/destroy WS node
+ * @cqp: struct for cqp hw
+ * @info: node info structure
+ * @node_op: 0 for add 1 for modify, 2 for delete
+ * @scratch: u64 saved to be used during cqp completion
+ */
+static enum irdma_status_code
+irdma_sc_manage_ws_node(struct irdma_sc_cqp *cqp,
+ struct irdma_ws_node_info *info,
+ enum irdma_ws_node_op node_op, u64 scratch)
+{
+ __le64 *wqe;
+ u64 temp = 0;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return IRDMA_ERR_RING_FULL;
+
+ set_64bit_val(wqe, 32,
+ FIELD_PREP(IRDMA_CQPSQ_WS_VSI, info->vsi) |
+ FIELD_PREP(IRDMA_CQPSQ_WS_WEIGHT, info->weight));
+
+ temp = FIELD_PREP(IRDMA_CQPSQ_WS_WQEVALID, cqp->polarity) |
+ FIELD_PREP(IRDMA_CQPSQ_WS_NODEOP, node_op) |
+ FIELD_PREP(IRDMA_CQPSQ_WS_ENABLENODE, info->enable) |
+ FIELD_PREP(IRDMA_CQPSQ_WS_NODETYPE, info->type_leaf) |
+ FIELD_PREP(IRDMA_CQPSQ_WS_PRIOTYPE, info->prio_type) |
+ FIELD_PREP(IRDMA_CQPSQ_WS_TC, info->tc) |
+ FIELD_PREP(IRDMA_CQPSQ_WS_OP, IRDMA_CQP_OP_WORK_SCHED_NODE) |
+ FIELD_PREP(IRDMA_CQPSQ_WS_PARENTID, info->parent_id) |
+ FIELD_PREP(IRDMA_CQPSQ_WS_NODEID, info->id);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, temp);
+
+ print_hex_dump_debug("WQE: MANAGE_WS WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_qp_flush_wqes - flush qp's wqe
+ * @qp: sc qp
+ * @info: dlush information
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+enum irdma_status_code irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
+ struct irdma_qp_flush_info *info,
+ u64 scratch, bool post_sq)
+{
+ u64 temp = 0;
+ __le64 *wqe;
+ struct irdma_sc_cqp *cqp;
+ u64 hdr;
+ bool flush_sq = false, flush_rq = false;
+
+ if (info->rq && !qp->flush_rq)
+ flush_rq = true;
+ if (info->sq && !qp->flush_sq)
+ flush_sq = true;
+ qp->flush_sq |= flush_sq;
+ qp->flush_rq |= flush_rq;
+
+ if (!flush_sq && !flush_rq) {
+ ibdev_dbg(to_ibdev(qp->dev),
+ "CQP: Additional flush request ignored for qp %x\n",
+ qp->qp_uk.qp_id);
+ return IRDMA_ERR_FLUSHED_Q;
+ }
+
+ cqp = qp->pd->dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return IRDMA_ERR_RING_FULL;
+
+ if (info->userflushcode) {
+ if (flush_rq)
+ temp |= FIELD_PREP(IRDMA_CQPSQ_FWQE_RQMNERR,
+ info->rq_minor_code) |
+ FIELD_PREP(IRDMA_CQPSQ_FWQE_RQMJERR,
+ info->rq_major_code);
+ if (flush_sq)
+ temp |= FIELD_PREP(IRDMA_CQPSQ_FWQE_SQMNERR,
+ info->sq_minor_code) |
+ FIELD_PREP(IRDMA_CQPSQ_FWQE_SQMJERR,
+ info->sq_major_code);
+ }
+ set_64bit_val(wqe, 16, temp);
+
+ temp = (info->generate_ae) ?
+ info->ae_code | FIELD_PREP(IRDMA_CQPSQ_FWQE_AESOURCE,
+ info->ae_src) : 0;
+ set_64bit_val(wqe, 8, temp);
+
+ hdr = qp->qp_uk.qp_id |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_FLUSH_WQES) |
+ FIELD_PREP(IRDMA_CQPSQ_FWQE_GENERATE_AE, info->generate_ae) |
+ FIELD_PREP(IRDMA_CQPSQ_FWQE_USERFLCODE, info->userflushcode) |
+ FIELD_PREP(IRDMA_CQPSQ_FWQE_FLUSHSQ, flush_sq) |
+ FIELD_PREP(IRDMA_CQPSQ_FWQE_FLUSHRQ, flush_rq) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: QP_FLUSH WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_gen_ae - generate AE, uses flush WQE CQP OP
+ * @qp: sc qp
+ * @info: gen ae information
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum irdma_status_code irdma_sc_gen_ae(struct irdma_sc_qp *qp,
+ struct irdma_gen_ae_info *info,
+ u64 scratch, bool post_sq)
+{
+ u64 temp;
+ __le64 *wqe;
+ struct irdma_sc_cqp *cqp;
+ u64 hdr;
+
+ cqp = qp->pd->dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return IRDMA_ERR_RING_FULL;
+
+ temp = info->ae_code | FIELD_PREP(IRDMA_CQPSQ_FWQE_AESOURCE,
+ info->ae_src);
+ set_64bit_val(wqe, 8, temp);
+
+ hdr = qp->qp_uk.qp_id | FIELD_PREP(IRDMA_CQPSQ_OPCODE,
+ IRDMA_CQP_OP_GEN_AE) |
+ FIELD_PREP(IRDMA_CQPSQ_FWQE_GENERATE_AE, 1) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: GEN_AE WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/*** irdma_sc_qp_upload_context - upload qp's context
+ * @dev: sc device struct
+ * @info: upload context info ptr for return
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum irdma_status_code
+irdma_sc_qp_upload_context(struct irdma_sc_dev *dev,
+ struct irdma_upload_context_info *info, u64 scratch,
+ bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_sc_cqp *cqp;
+ u64 hdr;
+
+ cqp = dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return IRDMA_ERR_RING_FULL;
+
+ set_64bit_val(wqe, 16, info->buf_pa);
+
+ hdr = FIELD_PREP(IRDMA_CQPSQ_UCTX_QPID, info->qp_id) |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_UPLOAD_CONTEXT) |
+ FIELD_PREP(IRDMA_CQPSQ_UCTX_QPTYPE, info->qp_type) |
+ FIELD_PREP(IRDMA_CQPSQ_UCTX_RAWFORMAT, info->raw_format) |
+ FIELD_PREP(IRDMA_CQPSQ_UCTX_FREEZEQP, info->freeze_qp) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: QP_UPLOAD_CTX WQE", DUMP_PREFIX_OFFSET, 16,
+ 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_manage_push_page - Handle push page
+ * @cqp: struct for cqp hw
+ * @info: push page info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum irdma_status_code
+irdma_sc_manage_push_page(struct irdma_sc_cqp *cqp,
+ struct irdma_cqp_manage_push_page_info *info,
+ u64 scratch, bool post_sq)
+{
+ __le64 *wqe;
+ u64 hdr;
+
+ if (info->free_page &&
+ info->push_idx >= cqp->dev->hw_attrs.max_hw_device_pages)
+ return IRDMA_ERR_INVALID_PUSH_PAGE_INDEX;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return IRDMA_ERR_RING_FULL;
+
+ set_64bit_val(wqe, 16, info->qs_handle);
+ hdr = FIELD_PREP(IRDMA_CQPSQ_MPP_PPIDX, info->push_idx) |
+ FIELD_PREP(IRDMA_CQPSQ_MPP_PPTYPE, info->push_page_type) |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_PUSH_PAGES) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) |
+ FIELD_PREP(IRDMA_CQPSQ_MPP_FREE_PAGE, info->free_page);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: MANAGE_PUSH_PAGES WQE", DUMP_PREFIX_OFFSET,
+ 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_suspend_qp - suspend qp for param change
+ * @cqp: struct for cqp hw
+ * @qp: sc qp struct
+ * @scratch: u64 saved to be used during cqp completion
+ */
+static enum irdma_status_code irdma_sc_suspend_qp(struct irdma_sc_cqp *cqp,
+ struct irdma_sc_qp *qp,
+ u64 scratch)
+{
+ u64 hdr;
+ __le64 *wqe;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return IRDMA_ERR_RING_FULL;
+
+ hdr = FIELD_PREP(IRDMA_CQPSQ_SUSPENDQP_QPID, qp->qp_uk.qp_id) |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_SUSPEND_QP) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: SUSPEND_QP WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_resume_qp - resume qp after suspend
+ * @cqp: struct for cqp hw
+ * @qp: sc qp struct
+ * @scratch: u64 saved to be used during cqp completion
+ */
+static enum irdma_status_code irdma_sc_resume_qp(struct irdma_sc_cqp *cqp,
+ struct irdma_sc_qp *qp,
+ u64 scratch)
+{
+ u64 hdr;
+ __le64 *wqe;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return IRDMA_ERR_RING_FULL;
+
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMA_CQPSQ_RESUMEQP_QSHANDLE, qp->qs_handle));
+
+ hdr = FIELD_PREP(IRDMA_CQPSQ_RESUMEQP_QPID, qp->qp_uk.qp_id) |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_RESUME_QP) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: RESUME_QP WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_cq_ack - acknowledge completion q
+ * @cq: cq struct
+ */
+static inline void irdma_sc_cq_ack(struct irdma_sc_cq *cq)
+{
+ writel(cq->cq_uk.cq_id, cq->cq_uk.cq_ack_db);
+}
+
+/**
+ * irdma_sc_cq_init - initialize completion q
+ * @cq: cq struct
+ * @info: cq initialization info
+ */
+enum irdma_status_code irdma_sc_cq_init(struct irdma_sc_cq *cq,
+ struct irdma_cq_init_info *info)
+{
+ enum irdma_status_code ret_code;
+ u32 pble_obj_cnt;
+
+ pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
+ if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
+ return IRDMA_ERR_INVALID_PBLE_INDEX;
+
+ cq->cq_pa = info->cq_base_pa;
+ cq->dev = info->dev;
+ cq->ceq_id = info->ceq_id;
+ info->cq_uk_init_info.cqe_alloc_db = cq->dev->cq_arm_db;
+ info->cq_uk_init_info.cq_ack_db = cq->dev->cq_ack_db;
+ ret_code = irdma_uk_cq_init(&cq->cq_uk, &info->cq_uk_init_info);
+ if (ret_code)
+ return ret_code;
+
+ cq->virtual_map = info->virtual_map;
+ cq->pbl_chunk_size = info->pbl_chunk_size;
+ cq->ceqe_mask = info->ceqe_mask;
+ cq->cq_type = (info->type) ? info->type : IRDMA_CQ_TYPE_IWARP;
+ cq->shadow_area_pa = info->shadow_area_pa;
+ cq->shadow_read_threshold = info->shadow_read_threshold;
+ cq->ceq_id_valid = info->ceq_id_valid;
+ cq->tph_en = info->tph_en;
+ cq->tph_val = info->tph_val;
+ cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
+ cq->vsi = info->vsi;
+
+ return 0;
+}
+
+/**
+ * irdma_sc_cq_create - create completion q
+ * @cq: cq struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @check_overflow: flag for overflow check
+ * @post_sq: flag for cqp db to ring
+ */
+static enum irdma_status_code irdma_sc_cq_create(struct irdma_sc_cq *cq,
+ u64 scratch,
+ bool check_overflow,
+ bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_sc_cqp *cqp;
+ u64 hdr;
+ struct irdma_sc_ceq *ceq;
+ enum irdma_status_code ret_code = 0;
+
+ cqp = cq->dev->cqp;
+ if (cq->cq_uk.cq_id > (cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt - 1))
+ return IRDMA_ERR_INVALID_CQ_ID;
+
+ if (cq->ceq_id > (cq->dev->hmc_fpm_misc.max_ceqs - 1))
+ return IRDMA_ERR_INVALID_CEQ_ID;
+
+ ceq = cq->dev->ceq[cq->ceq_id];
+ if (ceq && ceq->reg_cq)
+ ret_code = irdma_sc_add_cq_ctx(ceq, cq);
+
+ if (ret_code)
+ return ret_code;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe) {
+ if (ceq && ceq->reg_cq)
+ irdma_sc_remove_cq_ctx(ceq, cq);
+ return IRDMA_ERR_RING_FULL;
+ }
+
+ set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
+ set_64bit_val(wqe, 8, (uintptr_t)cq >> 1);
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, cq->shadow_read_threshold));
+ set_64bit_val(wqe, 32, (cq->virtual_map ? 0 : cq->cq_pa));
+ set_64bit_val(wqe, 40, cq->shadow_area_pa);
+ set_64bit_val(wqe, 48,
+ FIELD_PREP(IRDMA_CQPSQ_CQ_FIRSTPMPBLIDX, (cq->virtual_map ? cq->first_pm_pbl_idx : 0)));
+ set_64bit_val(wqe, 56,
+ FIELD_PREP(IRDMA_CQPSQ_TPHVAL, cq->tph_val) |
+ FIELD_PREP(IRDMA_CQPSQ_VSIIDX, cq->vsi->vsi_idx));
+
+ hdr = FLD_LS_64(cq->dev, cq->cq_uk.cq_id, IRDMA_CQPSQ_CQ_CQID) |
+ FLD_LS_64(cq->dev, (cq->ceq_id_valid ? cq->ceq_id : 0),
+ IRDMA_CQPSQ_CQ_CEQID) |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CQ) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, cq->pbl_chunk_size) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_CHKOVERFLOW, check_overflow) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, cq->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, cq->ceq_id_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_TPHEN, cq->tph_en) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT,
+ cq->cq_uk.avoid_mem_cflct) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: CQ_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_cq_destroy - destroy completion q
+ * @cq: cq struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+enum irdma_status_code irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch,
+ bool post_sq)
+{
+ struct irdma_sc_cqp *cqp;
+ __le64 *wqe;
+ u64 hdr;
+ struct irdma_sc_ceq *ceq;
+
+ cqp = cq->dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return IRDMA_ERR_RING_FULL;
+
+ ceq = cq->dev->ceq[cq->ceq_id];
+ if (ceq && ceq->reg_cq)
+ irdma_sc_remove_cq_ctx(ceq, cq);
+
+ set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
+ set_64bit_val(wqe, 8, (uintptr_t)cq >> 1);
+ set_64bit_val(wqe, 40, cq->shadow_area_pa);
+ set_64bit_val(wqe, 48,
+ (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
+
+ hdr = cq->cq_uk.cq_id |
+ FLD_LS_64(cq->dev, (cq->ceq_id_valid ? cq->ceq_id : 0),
+ IRDMA_CQPSQ_CQ_CEQID) |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_CQ) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, cq->pbl_chunk_size) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, cq->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, cq->ceq_id_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_TPHEN, cq->tph_en) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT, cq->cq_uk.avoid_mem_cflct) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: CQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_cq_resize - set resized cq buffer info
+ * @cq: resized cq
+ * @info: resized cq buffer info
+ */
+void irdma_sc_cq_resize(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info)
+{
+ cq->virtual_map = info->virtual_map;
+ cq->cq_pa = info->cq_pa;
+ cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
+ cq->pbl_chunk_size = info->pbl_chunk_size;
+ irdma_uk_cq_resize(&cq->cq_uk, info->cq_base, info->cq_size);
+}
+
+/**
+ * irdma_sc_cq_modify - modify a Completion Queue
+ * @cq: cq struct
+ * @info: modification info struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag to post to sq
+ */
+static enum irdma_status_code
+irdma_sc_cq_modify(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info,
+ u64 scratch, bool post_sq)
+{
+ struct irdma_sc_cqp *cqp;
+ __le64 *wqe;
+ u64 hdr;
+ u32 pble_obj_cnt;
+
+ pble_obj_cnt = cq->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
+ if (info->cq_resize && info->virtual_map &&
+ info->first_pm_pbl_idx >= pble_obj_cnt)
+ return IRDMA_ERR_INVALID_PBLE_INDEX;
+
+ cqp = cq->dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return IRDMA_ERR_RING_FULL;
+
+ set_64bit_val(wqe, 0, info->cq_size);
+ set_64bit_val(wqe, 8, (uintptr_t)cq >> 1);
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, info->shadow_read_threshold));
+ set_64bit_val(wqe, 32, info->cq_pa);
+ set_64bit_val(wqe, 40, cq->shadow_area_pa);
+ set_64bit_val(wqe, 48, info->first_pm_pbl_idx);
+ set_64bit_val(wqe, 56,
+ FIELD_PREP(IRDMA_CQPSQ_TPHVAL, cq->tph_val) |
+ FIELD_PREP(IRDMA_CQPSQ_VSIIDX, cq->vsi->vsi_idx));
+
+ hdr = cq->cq_uk.cq_id |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MODIFY_CQ) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_CQRESIZE, info->cq_resize) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, info->pbl_chunk_size) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_CHKOVERFLOW, info->check_overflow) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, info->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) |
+ FIELD_PREP(IRDMA_CQPSQ_TPHEN, cq->tph_en) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT,
+ cq->cq_uk.avoid_mem_cflct) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: CQ_MODIFY WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_check_cqp_progress - check cqp processing progress
+ * @timeout: timeout info struct
+ * @dev: sc device struct
+ */
+void irdma_check_cqp_progress(struct irdma_cqp_timeout *timeout, struct irdma_sc_dev *dev)
+{
+ if (timeout->compl_cqp_cmds != dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]) {
+ timeout->compl_cqp_cmds = dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS];
+ timeout->count = 0;
+ } else {
+ if (dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS] !=
+ timeout->compl_cqp_cmds)
+ timeout->count++;
+ }
+}
+
+/**
+ * irdma_get_cqp_reg_info - get head and tail for cqp using registers
+ * @cqp: struct for cqp hw
+ * @val: cqp tail register value
+ * @tail: wqtail register value
+ * @error: cqp processing err
+ */
+static inline void irdma_get_cqp_reg_info(struct irdma_sc_cqp *cqp, u32 *val,
+ u32 *tail, u32 *error)
+{
+ *val = readl(cqp->dev->hw_regs[IRDMA_CQPTAIL]);
+ *tail = FIELD_GET(IRDMA_CQPTAIL_WQTAIL, *val);
+ *error = FIELD_GET(IRDMA_CQPTAIL_CQP_OP_ERR, *val);
+}
+
+/**
+ * irdma_cqp_poll_registers - poll cqp registers
+ * @cqp: struct for cqp hw
+ * @tail: wqtail register value
+ * @count: how many times to try for completion
+ */
+static enum irdma_status_code irdma_cqp_poll_registers(struct irdma_sc_cqp *cqp,
+ u32 tail, u32 count)
+{
+ u32 i = 0;
+ u32 newtail, error, val;
+
+ while (i++ < count) {
+ irdma_get_cqp_reg_info(cqp, &val, &newtail, &error);
+ if (error) {
+ error = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]);
+ ibdev_dbg(to_ibdev(cqp->dev),
+ "CQP: CQPERRCODES error_code[x%08X]\n",
+ error);
+ return IRDMA_ERR_CQP_COMPL_ERROR;
+ }
+ if (newtail != tail) {
+ /* SUCCESS */
+ IRDMA_RING_MOVE_TAIL(cqp->sq_ring);
+ cqp->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]++;
+ return 0;
+ }
+ udelay(cqp->dev->hw_attrs.max_sleep_count);
+ }
+
+ return IRDMA_ERR_TIMEOUT;
+}
+
+/**
+ * irdma_sc_decode_fpm_commit - decode a 64 bit value into count and base
+ * @dev: sc device struct
+ * @buf: pointer to commit buffer
+ * @buf_idx: buffer index
+ * @obj_info: object info pointer
+ * @rsrc_idx: indexs of memory resource
+ */
+static u64 irdma_sc_decode_fpm_commit(struct irdma_sc_dev *dev, __le64 *buf,
+ u32 buf_idx, struct irdma_hmc_obj_info *obj_info,
+ u32 rsrc_idx)
+{
+ u64 temp;
+
+ get_64bit_val(buf, buf_idx, &temp);
+
+ switch (rsrc_idx) {
+ case IRDMA_HMC_IW_QP:
+ obj_info[rsrc_idx].cnt = (u32)FIELD_GET(IRDMA_COMMIT_FPM_QPCNT, temp);
+ break;
+ case IRDMA_HMC_IW_CQ:
+ obj_info[rsrc_idx].cnt = (u32)FLD_RS_64(dev, temp, IRDMA_COMMIT_FPM_CQCNT);
+ break;
+ case IRDMA_HMC_IW_APBVT_ENTRY:
+ obj_info[rsrc_idx].cnt = 1;
+ break;
+ default:
+ obj_info[rsrc_idx].cnt = (u32)temp;
+ break;
+ }
+
+ obj_info[rsrc_idx].base = (temp >> IRDMA_COMMIT_FPM_BASE_S) * 512;
+
+ return temp;
+}
+
+/**
+ * irdma_sc_parse_fpm_commit_buf - parse fpm commit buffer
+ * @dev: pointer to dev struct
+ * @buf: ptr to fpm commit buffer
+ * @info: ptr to irdma_hmc_obj_info struct
+ * @sd: number of SDs for HMC objects
+ *
+ * parses fpm commit info and copy base value
+ * of hmc objects in hmc_info
+ */
+static enum irdma_status_code
+irdma_sc_parse_fpm_commit_buf(struct irdma_sc_dev *dev, __le64 *buf,
+ struct irdma_hmc_obj_info *info, u32 *sd)
+{
+ u64 size;
+ u32 i;
+ u64 max_base = 0;
+ u32 last_hmc_obj = 0;
+
+ irdma_sc_decode_fpm_commit(dev, buf, 0, info,
+ IRDMA_HMC_IW_QP);
+ irdma_sc_decode_fpm_commit(dev, buf, 8, info,
+ IRDMA_HMC_IW_CQ);
+ /* skiping RSRVD */
+ irdma_sc_decode_fpm_commit(dev, buf, 24, info,
+ IRDMA_HMC_IW_HTE);
+ irdma_sc_decode_fpm_commit(dev, buf, 32, info,
+ IRDMA_HMC_IW_ARP);
+ irdma_sc_decode_fpm_commit(dev, buf, 40, info,
+ IRDMA_HMC_IW_APBVT_ENTRY);
+ irdma_sc_decode_fpm_commit(dev, buf, 48, info,
+ IRDMA_HMC_IW_MR);
+ irdma_sc_decode_fpm_commit(dev, buf, 56, info,
+ IRDMA_HMC_IW_XF);
+ irdma_sc_decode_fpm_commit(dev, buf, 64, info,
+ IRDMA_HMC_IW_XFFL);
+ irdma_sc_decode_fpm_commit(dev, buf, 72, info,
+ IRDMA_HMC_IW_Q1);
+ irdma_sc_decode_fpm_commit(dev, buf, 80, info,
+ IRDMA_HMC_IW_Q1FL);
+ irdma_sc_decode_fpm_commit(dev, buf, 88, info,
+ IRDMA_HMC_IW_TIMER);
+ irdma_sc_decode_fpm_commit(dev, buf, 112, info,
+ IRDMA_HMC_IW_PBLE);
+ /* skipping RSVD. */
+ if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1) {
+ irdma_sc_decode_fpm_commit(dev, buf, 96, info,
+ IRDMA_HMC_IW_FSIMC);
+ irdma_sc_decode_fpm_commit(dev, buf, 104, info,
+ IRDMA_HMC_IW_FSIAV);
+ irdma_sc_decode_fpm_commit(dev, buf, 128, info,
+ IRDMA_HMC_IW_RRF);
+ irdma_sc_decode_fpm_commit(dev, buf, 136, info,
+ IRDMA_HMC_IW_RRFFL);
+ irdma_sc_decode_fpm_commit(dev, buf, 144, info,
+ IRDMA_HMC_IW_HDR);
+ irdma_sc_decode_fpm_commit(dev, buf, 152, info,
+ IRDMA_HMC_IW_MD);
+ irdma_sc_decode_fpm_commit(dev, buf, 160, info,
+ IRDMA_HMC_IW_OOISC);
+ irdma_sc_decode_fpm_commit(dev, buf, 168, info,
+ IRDMA_HMC_IW_OOISCFFL);
+ }
+
+ /* searching for the last object in HMC to find the size of the HMC area. */
+ for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++) {
+ if (info[i].base > max_base) {
+ max_base = info[i].base;
+ last_hmc_obj = i;
+ }
+ }
+
+ size = info[last_hmc_obj].cnt * info[last_hmc_obj].size +
+ info[last_hmc_obj].base;
+
+ if (size & 0x1FFFFF)
+ *sd = (u32)((size >> 21) + 1); /* add 1 for remainder */
+ else
+ *sd = (u32)(size >> 21);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_decode_fpm_query() - Decode a 64 bit value into max count and size
+ * @buf: ptr to fpm query buffer
+ * @buf_idx: index into buf
+ * @obj_info: ptr to irdma_hmc_obj_info struct
+ * @rsrc_idx: resource index into info
+ *
+ * Decode a 64 bit value from fpm query buffer into max count and size
+ */
+static u64 irdma_sc_decode_fpm_query(__le64 *buf, u32 buf_idx,
+ struct irdma_hmc_obj_info *obj_info,
+ u32 rsrc_idx)
+{
+ u64 temp;
+ u32 size;
+
+ get_64bit_val(buf, buf_idx, &temp);
+ obj_info[rsrc_idx].max_cnt = (u32)temp;
+ size = (u32)(temp >> 32);
+ obj_info[rsrc_idx].size = BIT_ULL(size);
+
+ return temp;
+}
+
+/**
+ * irdma_sc_parse_fpm_query_buf() - parses fpm query buffer
+ * @dev: ptr to shared code device
+ * @buf: ptr to fpm query buffer
+ * @hmc_info: ptr to irdma_hmc_obj_info struct
+ * @hmc_fpm_misc: ptr to fpm data
+ *
+ * parses fpm query buffer and copy max_cnt and
+ * size value of hmc objects in hmc_info
+ */
+static enum irdma_status_code
+irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf,
+ struct irdma_hmc_info *hmc_info,
+ struct irdma_hmc_fpm_misc *hmc_fpm_misc)
+{
+ struct irdma_hmc_obj_info *obj_info;
+ u64 temp;
+ u32 size;
+ u16 max_pe_sds;
+
+ obj_info = hmc_info->hmc_obj;
+
+ get_64bit_val(buf, 0, &temp);
+ hmc_info->first_sd_index = (u16)FIELD_GET(IRDMA_QUERY_FPM_FIRST_PE_SD_INDEX, temp);
+ max_pe_sds = (u16)FIELD_GET(IRDMA_QUERY_FPM_MAX_PE_SDS, temp);
+
+ hmc_fpm_misc->max_sds = max_pe_sds;
+ hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index;
+ get_64bit_val(buf, 8, &temp);
+ obj_info[IRDMA_HMC_IW_QP].max_cnt = (u32)FIELD_GET(IRDMA_QUERY_FPM_MAX_QPS, temp);
+ size = (u32)(temp >> 32);
+ obj_info[IRDMA_HMC_IW_QP].size = BIT_ULL(size);
+
+ get_64bit_val(buf, 16, &temp);
+ obj_info[IRDMA_HMC_IW_CQ].max_cnt = (u32)FIELD_GET(IRDMA_QUERY_FPM_MAX_CQS, temp);
+ size = (u32)(temp >> 32);
+ obj_info[IRDMA_HMC_IW_CQ].size = BIT_ULL(size);
+
+ irdma_sc_decode_fpm_query(buf, 32, obj_info, IRDMA_HMC_IW_HTE);
+ irdma_sc_decode_fpm_query(buf, 40, obj_info, IRDMA_HMC_IW_ARP);
+
+ obj_info[IRDMA_HMC_IW_APBVT_ENTRY].size = 8192;
+ obj_info[IRDMA_HMC_IW_APBVT_ENTRY].max_cnt = 1;
+
+ irdma_sc_decode_fpm_query(buf, 48, obj_info, IRDMA_HMC_IW_MR);
+ irdma_sc_decode_fpm_query(buf, 56, obj_info, IRDMA_HMC_IW_XF);
+
+ get_64bit_val(buf, 64, &temp);
+ obj_info[IRDMA_HMC_IW_XFFL].max_cnt = (u32)temp;
+ obj_info[IRDMA_HMC_IW_XFFL].size = 4;
+ hmc_fpm_misc->xf_block_size = FIELD_GET(IRDMA_QUERY_FPM_XFBLOCKSIZE, temp);
+ if (!hmc_fpm_misc->xf_block_size)
+ return IRDMA_ERR_INVALID_SIZE;
+
+ irdma_sc_decode_fpm_query(buf, 72, obj_info, IRDMA_HMC_IW_Q1);
+ get_64bit_val(buf, 80, &temp);
+ obj_info[IRDMA_HMC_IW_Q1FL].max_cnt = (u32)temp;
+ obj_info[IRDMA_HMC_IW_Q1FL].size = 4;
+
+ hmc_fpm_misc->q1_block_size = FIELD_GET(IRDMA_QUERY_FPM_Q1BLOCKSIZE, temp);
+ if (!hmc_fpm_misc->q1_block_size)
+ return IRDMA_ERR_INVALID_SIZE;
+
+ irdma_sc_decode_fpm_query(buf, 88, obj_info, IRDMA_HMC_IW_TIMER);
+
+ get_64bit_val(buf, 112, &temp);
+ obj_info[IRDMA_HMC_IW_PBLE].max_cnt = (u32)temp;
+ obj_info[IRDMA_HMC_IW_PBLE].size = 8;
+
+ get_64bit_val(buf, 120, &temp);
+ hmc_fpm_misc->max_ceqs = FIELD_GET(IRDMA_QUERY_FPM_MAX_CEQS, temp);
+ hmc_fpm_misc->ht_multiplier = FIELD_GET(IRDMA_QUERY_FPM_HTMULTIPLIER, temp);
+ hmc_fpm_misc->timer_bucket = FIELD_GET(IRDMA_QUERY_FPM_TIMERBUCKET, temp);
+ if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
+ return 0;
+ irdma_sc_decode_fpm_query(buf, 96, obj_info, IRDMA_HMC_IW_FSIMC);
+ irdma_sc_decode_fpm_query(buf, 104, obj_info, IRDMA_HMC_IW_FSIAV);
+ irdma_sc_decode_fpm_query(buf, 128, obj_info, IRDMA_HMC_IW_RRF);
+
+ get_64bit_val(buf, 136, &temp);
+ obj_info[IRDMA_HMC_IW_RRFFL].max_cnt = (u32)temp;
+ obj_info[IRDMA_HMC_IW_RRFFL].size = 4;
+ hmc_fpm_misc->rrf_block_size = FIELD_GET(IRDMA_QUERY_FPM_RRFBLOCKSIZE, temp);
+ if (!hmc_fpm_misc->rrf_block_size &&
+ obj_info[IRDMA_HMC_IW_RRFFL].max_cnt)
+ return IRDMA_ERR_INVALID_SIZE;
+
+ irdma_sc_decode_fpm_query(buf, 144, obj_info, IRDMA_HMC_IW_HDR);
+ irdma_sc_decode_fpm_query(buf, 152, obj_info, IRDMA_HMC_IW_MD);
+ irdma_sc_decode_fpm_query(buf, 160, obj_info, IRDMA_HMC_IW_OOISC);
+
+ get_64bit_val(buf, 168, &temp);
+ obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt = (u32)temp;
+ obj_info[IRDMA_HMC_IW_OOISCFFL].size = 4;
+ hmc_fpm_misc->ooiscf_block_size = FIELD_GET(IRDMA_QUERY_FPM_OOISCFBLOCKSIZE, temp);
+ if (!hmc_fpm_misc->ooiscf_block_size &&
+ obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt)
+ return IRDMA_ERR_INVALID_SIZE;
+
+ return 0;
+}
+
+/**
+ * irdma_sc_find_reg_cq - find cq ctx index
+ * @ceq: ceq sc structure
+ * @cq: cq sc structure
+ */
+static u32 irdma_sc_find_reg_cq(struct irdma_sc_ceq *ceq,
+ struct irdma_sc_cq *cq)
+{
+ u32 i;
+
+ for (i = 0; i < ceq->reg_cq_size; i++) {
+ if (cq == ceq->reg_cq[i])
+ return i;
+ }
+
+ return IRDMA_INVALID_CQ_IDX;
+}
+
+/**
+ * irdma_sc_add_cq_ctx - add cq ctx tracking for ceq
+ * @ceq: ceq sc structure
+ * @cq: cq sc structure
+ */
+enum irdma_status_code irdma_sc_add_cq_ctx(struct irdma_sc_ceq *ceq,
+ struct irdma_sc_cq *cq)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ceq->req_cq_lock, flags);
+
+ if (ceq->reg_cq_size == ceq->elem_cnt) {
+ spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
+ return IRDMA_ERR_REG_CQ_FULL;
+ }
+
+ ceq->reg_cq[ceq->reg_cq_size++] = cq;
+
+ spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_remove_cq_ctx - remove cq ctx tracking for ceq
+ * @ceq: ceq sc structure
+ * @cq: cq sc structure
+ */
+void irdma_sc_remove_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq)
+{
+ unsigned long flags;
+ u32 cq_ctx_idx;
+
+ spin_lock_irqsave(&ceq->req_cq_lock, flags);
+ cq_ctx_idx = irdma_sc_find_reg_cq(ceq, cq);
+ if (cq_ctx_idx == IRDMA_INVALID_CQ_IDX)
+ goto exit;
+
+ ceq->reg_cq_size--;
+ if (cq_ctx_idx != ceq->reg_cq_size)
+ ceq->reg_cq[cq_ctx_idx] = ceq->reg_cq[ceq->reg_cq_size];
+ ceq->reg_cq[ceq->reg_cq_size] = NULL;
+
+exit:
+ spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
+}
+
+/**
+ * irdma_sc_cqp_init - Initialize buffers for a control Queue Pair
+ * @cqp: IWARP control queue pair pointer
+ * @info: IWARP control queue pair init info pointer
+ *
+ * Initializes the object and context buffers for a control Queue Pair.
+ */
+enum irdma_status_code irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
+ struct irdma_cqp_init_info *info)
+{
+ u8 hw_sq_size;
+
+ if (info->sq_size > IRDMA_CQP_SW_SQSIZE_2048 ||
+ info->sq_size < IRDMA_CQP_SW_SQSIZE_4 ||
+ ((info->sq_size & (info->sq_size - 1))))
+ return IRDMA_ERR_INVALID_SIZE;
+
+ hw_sq_size = irdma_get_encoded_wqe_size(info->sq_size,
+ IRDMA_QUEUE_TYPE_CQP);
+ cqp->size = sizeof(*cqp);
+ cqp->sq_size = info->sq_size;
+ cqp->hw_sq_size = hw_sq_size;
+ cqp->sq_base = info->sq;
+ cqp->host_ctx = info->host_ctx;
+ cqp->sq_pa = info->sq_pa;
+ cqp->host_ctx_pa = info->host_ctx_pa;
+ cqp->dev = info->dev;
+ cqp->struct_ver = info->struct_ver;
+ cqp->hw_maj_ver = info->hw_maj_ver;
+ cqp->hw_min_ver = info->hw_min_ver;
+ cqp->scratch_array = info->scratch_array;
+ cqp->polarity = 0;
+ cqp->en_datacenter_tcp = info->en_datacenter_tcp;
+ cqp->ena_vf_count = info->ena_vf_count;
+ cqp->hmc_profile = info->hmc_profile;
+ cqp->ceqs_per_vf = info->ceqs_per_vf;
+ cqp->disable_packed = info->disable_packed;
+ cqp->rocev2_rto_policy = info->rocev2_rto_policy;
+ cqp->protocol_used = info->protocol_used;
+ memcpy(&cqp->dcqcn_params, &info->dcqcn_params, sizeof(cqp->dcqcn_params));
+ info->dev->cqp = cqp;
+
+ IRDMA_RING_INIT(cqp->sq_ring, cqp->sq_size);
+ cqp->dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS] = 0;
+ cqp->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS] = 0;
+ /* for the cqp commands backlog. */
+ INIT_LIST_HEAD(&cqp->dev->cqp_cmd_head);
+
+ writel(0, cqp->dev->hw_regs[IRDMA_CQPTAIL]);
+ writel(0, cqp->dev->hw_regs[IRDMA_CQPDB]);
+ writel(0, cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
+
+ ibdev_dbg(to_ibdev(cqp->dev),
+ "WQE: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%pK] cqp[%p] polarity[x%04x]\n",
+ cqp->sq_size, cqp->hw_sq_size, cqp->sq_base,
+ (u64 *)(uintptr_t)cqp->sq_pa, cqp, cqp->polarity);
+ return 0;
+}
+
+/**
+ * irdma_sc_cqp_create - create cqp during bringup
+ * @cqp: struct for cqp hw
+ * @maj_err: If error, major err number
+ * @min_err: If error, minor err number
+ */
+enum irdma_status_code irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err,
+ u16 *min_err)
+{
+ u64 temp;
+ u8 hw_rev;
+ u32 cnt = 0, p1, p2, val = 0, err_code;
+ enum irdma_status_code ret_code;
+
+ hw_rev = cqp->dev->hw_attrs.uk_attrs.hw_rev;
+ cqp->sdbuf.size = ALIGN(IRDMA_UPDATE_SD_BUFF_SIZE * cqp->sq_size,
+ IRDMA_SD_BUF_ALIGNMENT);
+ cqp->sdbuf.va = dma_alloc_coherent(cqp->dev->hw->device,
+ cqp->sdbuf.size, &cqp->sdbuf.pa,
+ GFP_KERNEL);
+ if (!cqp->sdbuf.va)
+ return IRDMA_ERR_NO_MEMORY;
+
+ spin_lock_init(&cqp->dev->cqp_lock);
+
+ temp = FIELD_PREP(IRDMA_CQPHC_SQSIZE, cqp->hw_sq_size) |
+ FIELD_PREP(IRDMA_CQPHC_SVER, cqp->struct_ver) |
+ FIELD_PREP(IRDMA_CQPHC_DISABLE_PFPDUS, cqp->disable_packed) |
+ FIELD_PREP(IRDMA_CQPHC_CEQPERVF, cqp->ceqs_per_vf);
+ if (hw_rev >= IRDMA_GEN_2) {
+ temp |= FIELD_PREP(IRDMA_CQPHC_ROCEV2_RTO_POLICY,
+ cqp->rocev2_rto_policy) |
+ FIELD_PREP(IRDMA_CQPHC_PROTOCOL_USED,
+ cqp->protocol_used);
+ }
+
+ set_64bit_val(cqp->host_ctx, 0, temp);
+ set_64bit_val(cqp->host_ctx, 8, cqp->sq_pa);
+
+ temp = FIELD_PREP(IRDMA_CQPHC_ENABLED_VFS, cqp->ena_vf_count) |
+ FIELD_PREP(IRDMA_CQPHC_HMC_PROFILE, cqp->hmc_profile);
+ set_64bit_val(cqp->host_ctx, 16, temp);
+ set_64bit_val(cqp->host_ctx, 24, (uintptr_t)cqp);
+ temp = FIELD_PREP(IRDMA_CQPHC_HW_MAJVER, cqp->hw_maj_ver) |
+ FIELD_PREP(IRDMA_CQPHC_HW_MINVER, cqp->hw_min_ver);
+ if (hw_rev >= IRDMA_GEN_2) {
+ temp |= FIELD_PREP(IRDMA_CQPHC_MIN_RATE, cqp->dcqcn_params.min_rate) |
+ FIELD_PREP(IRDMA_CQPHC_MIN_DEC_FACTOR, cqp->dcqcn_params.min_dec_factor);
+ }
+ set_64bit_val(cqp->host_ctx, 32, temp);
+ set_64bit_val(cqp->host_ctx, 40, 0);
+ temp = 0;
+ if (hw_rev >= IRDMA_GEN_2) {
+ temp |= FIELD_PREP(IRDMA_CQPHC_DCQCN_T, cqp->dcqcn_params.dcqcn_t) |
+ FIELD_PREP(IRDMA_CQPHC_RAI_FACTOR, cqp->dcqcn_params.rai_factor) |
+ FIELD_PREP(IRDMA_CQPHC_HAI_FACTOR, cqp->dcqcn_params.hai_factor);
+ }
+ set_64bit_val(cqp->host_ctx, 48, temp);
+ temp = 0;
+ if (hw_rev >= IRDMA_GEN_2) {
+ temp |= FIELD_PREP(IRDMA_CQPHC_DCQCN_B, cqp->dcqcn_params.dcqcn_b) |
+ FIELD_PREP(IRDMA_CQPHC_DCQCN_F, cqp->dcqcn_params.dcqcn_f) |
+ FIELD_PREP(IRDMA_CQPHC_CC_CFG_VALID, cqp->dcqcn_params.cc_cfg_valid) |
+ FIELD_PREP(IRDMA_CQPHC_RREDUCE_MPERIOD, cqp->dcqcn_params.rreduce_mperiod);
+ }
+ set_64bit_val(cqp->host_ctx, 56, temp);
+ print_hex_dump_debug("WQE: CQP_HOST_CTX WQE", DUMP_PREFIX_OFFSET, 16,
+ 8, cqp->host_ctx, IRDMA_CQP_CTX_SIZE * 8, false);
+ p1 = cqp->host_ctx_pa >> 32;
+ p2 = (u32)cqp->host_ctx_pa;
+
+ writel(p1, cqp->dev->hw_regs[IRDMA_CCQPHIGH]);
+ writel(p2, cqp->dev->hw_regs[IRDMA_CCQPLOW]);
+
+ do {
+ if (cnt++ > cqp->dev->hw_attrs.max_done_count) {
+ ret_code = IRDMA_ERR_TIMEOUT;
+ goto err;
+ }
+ udelay(cqp->dev->hw_attrs.max_sleep_count);
+ val = readl(cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
+ } while (!val);
+
+ if (FLD_RS_32(cqp->dev, val, IRDMA_CCQPSTATUS_CCQP_ERR)) {
+ ret_code = IRDMA_ERR_DEVICE_NOT_SUPPORTED;
+ goto err;
+ }
+
+ cqp->process_cqp_sds = irdma_update_sds_noccq;
+ return 0;
+
+err:
+ dma_free_coherent(cqp->dev->hw->device, cqp->sdbuf.size,
+ cqp->sdbuf.va, cqp->sdbuf.pa);
+ cqp->sdbuf.va = NULL;
+ err_code = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]);
+ *min_err = FIELD_GET(IRDMA_CQPERRCODES_CQP_MINOR_CODE, err_code);
+ *maj_err = FIELD_GET(IRDMA_CQPERRCODES_CQP_MAJOR_CODE, err_code);
+ return ret_code;
+}
+
+/**
+ * irdma_sc_cqp_post_sq - post of cqp's sq
+ * @cqp: struct for cqp hw
+ */
+void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp)
+{
+ writel(IRDMA_RING_CURRENT_HEAD(cqp->sq_ring), cqp->dev->cqp_db);
+
+ ibdev_dbg(to_ibdev(cqp->dev),
+ "WQE: CQP SQ head 0x%x tail 0x%x size 0x%x\n",
+ cqp->sq_ring.head, cqp->sq_ring.tail, cqp->sq_ring.size);
+}
+
+/**
+ * irdma_sc_cqp_get_next_send_wqe_idx - get next wqe on cqp sq
+ * and pass back index
+ * @cqp: CQP HW structure
+ * @scratch: private data for CQP WQE
+ * @wqe_idx: WQE index of CQP SQ
+ */
+__le64 *irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch,
+ u32 *wqe_idx)
+{
+ __le64 *wqe = NULL;
+ enum irdma_status_code ret_code;
+
+ if (IRDMA_RING_FULL_ERR(cqp->sq_ring)) {
+ ibdev_dbg(to_ibdev(cqp->dev),
+ "WQE: CQP SQ is full, head 0x%x tail 0x%x size 0x%x\n",
+ cqp->sq_ring.head, cqp->sq_ring.tail,
+ cqp->sq_ring.size);
+ return NULL;
+ }
+ IRDMA_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, *wqe_idx, ret_code);
+ if (ret_code)
+ return NULL;
+
+ cqp->dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS]++;
+ if (!*wqe_idx)
+ cqp->polarity = !cqp->polarity;
+ wqe = cqp->sq_base[*wqe_idx].elem;
+ cqp->scratch_array[*wqe_idx] = scratch;
+ IRDMA_CQP_INIT_WQE(wqe);
+
+ return wqe;
+}
+
+/**
+ * irdma_sc_cqp_destroy - destroy cqp during close
+ * @cqp: struct for cqp hw
+ */
+enum irdma_status_code irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp)
+{
+ u32 cnt = 0, val;
+ enum irdma_status_code ret_code = 0;
+
+ writel(0, cqp->dev->hw_regs[IRDMA_CCQPHIGH]);
+ writel(0, cqp->dev->hw_regs[IRDMA_CCQPLOW]);
+ do {
+ if (cnt++ > cqp->dev->hw_attrs.max_done_count) {
+ ret_code = IRDMA_ERR_TIMEOUT;
+ break;
+ }
+ udelay(cqp->dev->hw_attrs.max_sleep_count);
+ val = readl(cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
+ } while (FLD_RS_32(cqp->dev, val, IRDMA_CCQPSTATUS_CCQP_DONE));
+
+ dma_free_coherent(cqp->dev->hw->device, cqp->sdbuf.size,
+ cqp->sdbuf.va, cqp->sdbuf.pa);
+ cqp->sdbuf.va = NULL;
+ return ret_code;
+}
+
+/**
+ * irdma_sc_ccq_arm - enable intr for control cq
+ * @ccq: ccq sc struct
+ */
+void irdma_sc_ccq_arm(struct irdma_sc_cq *ccq)
+{
+ u64 temp_val;
+ u16 sw_cq_sel;
+ u8 arm_next_se;
+ u8 arm_seq_num;
+
+ get_64bit_val(ccq->cq_uk.shadow_area, 32, &temp_val);
+ sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
+ arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
+ arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
+ arm_seq_num++;
+ temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
+ FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
+ FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
+ FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, 1);
+ set_64bit_val(ccq->cq_uk.shadow_area, 32, temp_val);
+
+ dma_wmb(); /* make sure shadow area is updated before arming */
+
+ writel(ccq->cq_uk.cq_id, ccq->dev->cq_arm_db);
+}
+
+/**
+ * irdma_sc_ccq_get_cqe_info - get ccq's cq entry
+ * @ccq: ccq sc struct
+ * @info: completion q entry to return
+ */
+enum irdma_status_code irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
+ struct irdma_ccq_cqe_info *info)
+{
+ u64 qp_ctx, temp, temp1;
+ __le64 *cqe;
+ struct irdma_sc_cqp *cqp;
+ u32 wqe_idx;
+ u32 error;
+ u8 polarity;
+ enum irdma_status_code ret_code = 0;
+
+ if (ccq->cq_uk.avoid_mem_cflct)
+ cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(&ccq->cq_uk);
+ else
+ cqe = IRDMA_GET_CURRENT_CQ_ELEM(&ccq->cq_uk);
+
+ get_64bit_val(cqe, 24, &temp);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, temp);
+ if (polarity != ccq->cq_uk.polarity)
+ return IRDMA_ERR_Q_EMPTY;
+
+ get_64bit_val(cqe, 8, &qp_ctx);
+ cqp = (struct irdma_sc_cqp *)(unsigned long)qp_ctx;
+ info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, temp);
+ info->maj_err_code = IRDMA_CQPSQ_MAJ_NO_ERROR;
+ info->min_err_code = (u16)FIELD_GET(IRDMA_CQ_MINERR, temp);
+ if (info->error) {
+ info->maj_err_code = (u16)FIELD_GET(IRDMA_CQ_MAJERR, temp);
+ error = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]);
+ ibdev_dbg(to_ibdev(cqp->dev),
+ "CQP: CQPERRCODES error_code[x%08X]\n", error);
+ }
+
+ wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, temp);
+ info->scratch = cqp->scratch_array[wqe_idx];
+
+ get_64bit_val(cqe, 16, &temp1);
+ info->op_ret_val = (u32)FIELD_GET(IRDMA_CCQ_OPRETVAL, temp1);
+ get_64bit_val(cqp->sq_base[wqe_idx].elem, 24, &temp1);
+ info->op_code = (u8)FIELD_GET(IRDMA_CQPSQ_OPCODE, temp1);
+ info->cqp = cqp;
+
+ /* move the head for cq */
+ IRDMA_RING_MOVE_HEAD(ccq->cq_uk.cq_ring, ret_code);
+ if (!IRDMA_RING_CURRENT_HEAD(ccq->cq_uk.cq_ring))
+ ccq->cq_uk.polarity ^= 1;
+
+ /* update cq tail in cq shadow memory also */
+ IRDMA_RING_MOVE_TAIL(ccq->cq_uk.cq_ring);
+ set_64bit_val(ccq->cq_uk.shadow_area, 0,
+ IRDMA_RING_CURRENT_HEAD(ccq->cq_uk.cq_ring));
+
+ dma_wmb(); /* make sure shadow area is updated before moving tail */
+
+ IRDMA_RING_MOVE_TAIL(cqp->sq_ring);
+ ccq->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]++;
+
+ return ret_code;
+}
+
+/**
+ * irdma_sc_poll_for_cqp_op_done - Waits for last write to complete in CQP SQ
+ * @cqp: struct for cqp hw
+ * @op_code: cqp opcode for completion
+ * @compl_info: completion q entry to return
+ */
+enum irdma_status_code irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 op_code,
+ struct irdma_ccq_cqe_info *compl_info)
+{
+ struct irdma_ccq_cqe_info info = {};
+ struct irdma_sc_cq *ccq;
+ enum irdma_status_code ret_code = 0;
+ u32 cnt = 0;
+
+ ccq = cqp->dev->ccq;
+ while (1) {
+ if (cnt++ > 100 * cqp->dev->hw_attrs.max_done_count)
+ return IRDMA_ERR_TIMEOUT;
+
+ if (irdma_sc_ccq_get_cqe_info(ccq, &info)) {
+ udelay(cqp->dev->hw_attrs.max_sleep_count);
+ continue;
+ }
+ if (info.error && info.op_code != IRDMA_CQP_OP_QUERY_STAG) {
+ ret_code = IRDMA_ERR_CQP_COMPL_ERROR;
+ break;
+ }
+ /* make sure op code matches*/
+ if (op_code == info.op_code)
+ break;
+ ibdev_dbg(to_ibdev(cqp->dev),
+ "WQE: opcode mismatch for my op code 0x%x, returned opcode %x\n",
+ op_code, info.op_code);
+ }
+
+ if (compl_info)
+ memcpy(compl_info, &info, sizeof(*compl_info));
+
+ return ret_code;
+}
+
+/**
+ * irdma_sc_manage_hmc_pm_func_table - manage of function table
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @info: info for the manage function table operation
+ * @post_sq: flag for cqp db to ring
+ */
+static enum irdma_status_code
+irdma_sc_manage_hmc_pm_func_table(struct irdma_sc_cqp *cqp,
+ struct irdma_hmc_fcn_info *info,
+ u64 scratch, bool post_sq)
+{
+ __le64 *wqe;
+ u64 hdr;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return IRDMA_ERR_RING_FULL;
+
+ set_64bit_val(wqe, 0, 0);
+ set_64bit_val(wqe, 8, 0);
+ set_64bit_val(wqe, 16, 0);
+ set_64bit_val(wqe, 32, 0);
+ set_64bit_val(wqe, 40, 0);
+ set_64bit_val(wqe, 48, 0);
+ set_64bit_val(wqe, 56, 0);
+
+ hdr = FIELD_PREP(IRDMA_CQPSQ_MHMC_VFIDX, info->vf_id) |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE,
+ IRDMA_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE) |
+ FIELD_PREP(IRDMA_CQPSQ_MHMC_FREEPMFN, info->free_fcn) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: MANAGE_HMC_PM_FUNC_TABLE WQE",
+ DUMP_PREFIX_OFFSET, 16, 8, wqe,
+ IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_commit_fpm_val_done - wait for cqp eqe completion
+ * for fpm commit
+ * @cqp: struct for cqp hw
+ */
+static enum irdma_status_code
+irdma_sc_commit_fpm_val_done(struct irdma_sc_cqp *cqp)
+{
+ return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_COMMIT_FPM_VAL,
+ NULL);
+}
+
+/**
+ * irdma_sc_commit_fpm_val - cqp wqe for commit fpm values
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @hmc_fn_id: hmc function id
+ * @commit_fpm_mem: Memory for fpm values
+ * @post_sq: flag for cqp db to ring
+ * @wait_type: poll ccq or cqp registers for cqp completion
+ */
+static enum irdma_status_code
+irdma_sc_commit_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch, u8 hmc_fn_id,
+ struct irdma_dma_mem *commit_fpm_mem, bool post_sq,
+ u8 wait_type)
+{
+ __le64 *wqe;
+ u64 hdr;
+ u32 tail, val, error;
+ enum irdma_status_code ret_code = 0;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return IRDMA_ERR_RING_FULL;
+
+ set_64bit_val(wqe, 16, hmc_fn_id);
+ set_64bit_val(wqe, 32, commit_fpm_mem->pa);
+
+ hdr = FIELD_PREP(IRDMA_CQPSQ_BUFSIZE, IRDMA_COMMIT_FPM_BUF_SIZE) |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_COMMIT_FPM_VAL) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: COMMIT_FPM_VAL WQE", DUMP_PREFIX_OFFSET,
+ 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
+
+ if (post_sq) {
+ irdma_sc_cqp_post_sq(cqp);
+ if (wait_type == IRDMA_CQP_WAIT_POLL_REGS)
+ ret_code = irdma_cqp_poll_registers(cqp, tail,
+ cqp->dev->hw_attrs.max_done_count);
+ else if (wait_type == IRDMA_CQP_WAIT_POLL_CQ)
+ ret_code = irdma_sc_commit_fpm_val_done(cqp);
+ }
+
+ return ret_code;
+}
+
+/**
+ * irdma_sc_query_fpm_val_done - poll for cqp wqe completion for
+ * query fpm
+ * @cqp: struct for cqp hw
+ */
+static enum irdma_status_code
+irdma_sc_query_fpm_val_done(struct irdma_sc_cqp *cqp)
+{
+ return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_QUERY_FPM_VAL,
+ NULL);
+}
+
+/**
+ * irdma_sc_query_fpm_val - cqp wqe query fpm values
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @hmc_fn_id: hmc function id
+ * @query_fpm_mem: memory for return fpm values
+ * @post_sq: flag for cqp db to ring
+ * @wait_type: poll ccq or cqp registers for cqp completion
+ */
+static enum irdma_status_code
+irdma_sc_query_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch, u8 hmc_fn_id,
+ struct irdma_dma_mem *query_fpm_mem, bool post_sq,
+ u8 wait_type)
+{
+ __le64 *wqe;
+ u64 hdr;
+ u32 tail, val, error;
+ enum irdma_status_code ret_code = 0;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return IRDMA_ERR_RING_FULL;
+
+ set_64bit_val(wqe, 16, hmc_fn_id);
+ set_64bit_val(wqe, 32, query_fpm_mem->pa);
+
+ hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_QUERY_FPM_VAL) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: QUERY_FPM WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
+
+ if (post_sq) {
+ irdma_sc_cqp_post_sq(cqp);
+ if (wait_type == IRDMA_CQP_WAIT_POLL_REGS)
+ ret_code = irdma_cqp_poll_registers(cqp, tail,
+ cqp->dev->hw_attrs.max_done_count);
+ else if (wait_type == IRDMA_CQP_WAIT_POLL_CQ)
+ ret_code = irdma_sc_query_fpm_val_done(cqp);
+ }
+
+ return ret_code;
+}
+
+/**
+ * irdma_sc_ceq_init - initialize ceq
+ * @ceq: ceq sc structure
+ * @info: ceq initialization info
+ */
+enum irdma_status_code irdma_sc_ceq_init(struct irdma_sc_ceq *ceq,
+ struct irdma_ceq_init_info *info)
+{
+ u32 pble_obj_cnt;
+
+ if (info->elem_cnt < info->dev->hw_attrs.min_hw_ceq_size ||
+ info->elem_cnt > info->dev->hw_attrs.max_hw_ceq_size)
+ return IRDMA_ERR_INVALID_SIZE;
+
+ if (info->ceq_id > (info->dev->hmc_fpm_misc.max_ceqs - 1))
+ return IRDMA_ERR_INVALID_CEQ_ID;
+ pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
+
+ if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
+ return IRDMA_ERR_INVALID_PBLE_INDEX;
+
+ ceq->size = sizeof(*ceq);
+ ceq->ceqe_base = (struct irdma_ceqe *)info->ceqe_base;
+ ceq->ceq_id = info->ceq_id;
+ ceq->dev = info->dev;
+ ceq->elem_cnt = info->elem_cnt;
+ ceq->ceq_elem_pa = info->ceqe_pa;
+ ceq->virtual_map = info->virtual_map;
+ ceq->itr_no_expire = info->itr_no_expire;
+ ceq->reg_cq = info->reg_cq;
+ ceq->reg_cq_size = 0;
+ spin_lock_init(&ceq->req_cq_lock);
+ ceq->pbl_chunk_size = (ceq->virtual_map ? info->pbl_chunk_size : 0);
+ ceq->first_pm_pbl_idx = (ceq->virtual_map ? info->first_pm_pbl_idx : 0);
+ ceq->pbl_list = (ceq->virtual_map ? info->pbl_list : NULL);
+ ceq->tph_en = info->tph_en;
+ ceq->tph_val = info->tph_val;
+ ceq->vsi = info->vsi;
+ ceq->polarity = 1;
+ IRDMA_RING_INIT(ceq->ceq_ring, ceq->elem_cnt);
+ ceq->dev->ceq[info->ceq_id] = ceq;
+
+ return 0;
+}
+
+/**
+ * irdma_sc_ceq_create - create ceq wqe
+ * @ceq: ceq sc structure
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+
+static enum irdma_status_code irdma_sc_ceq_create(struct irdma_sc_ceq *ceq, u64 scratch,
+ bool post_sq)
+{
+ struct irdma_sc_cqp *cqp;
+ __le64 *wqe;
+ u64 hdr;
+
+ cqp = ceq->dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return IRDMA_ERR_RING_FULL;
+ set_64bit_val(wqe, 16, ceq->elem_cnt);
+ set_64bit_val(wqe, 32,
+ (ceq->virtual_map ? 0 : ceq->ceq_elem_pa));
+ set_64bit_val(wqe, 48,
+ (ceq->virtual_map ? ceq->first_pm_pbl_idx : 0));
+ set_64bit_val(wqe, 56,
+ FIELD_PREP(IRDMA_CQPSQ_TPHVAL, ceq->tph_val) |
+ FIELD_PREP(IRDMA_CQPSQ_VSIIDX, ceq->vsi->vsi_idx));
+ hdr = FIELD_PREP(IRDMA_CQPSQ_CEQ_CEQID, ceq->ceq_id) |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CEQ) |
+ FIELD_PREP(IRDMA_CQPSQ_CEQ_LPBLSIZE, ceq->pbl_chunk_size) |
+ FIELD_PREP(IRDMA_CQPSQ_CEQ_VMAP, ceq->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_CEQ_ITRNOEXPIRE, ceq->itr_no_expire) |
+ FIELD_PREP(IRDMA_CQPSQ_TPHEN, ceq->tph_en) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: CEQ_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_cceq_create_done - poll for control ceq wqe to complete
+ * @ceq: ceq sc structure
+ */
+static enum irdma_status_code
+irdma_sc_cceq_create_done(struct irdma_sc_ceq *ceq)
+{
+ struct irdma_sc_cqp *cqp;
+
+ cqp = ceq->dev->cqp;
+ return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_CREATE_CEQ,
+ NULL);
+}
+
+/**
+ * irdma_sc_cceq_destroy_done - poll for destroy cceq to complete
+ * @ceq: ceq sc structure
+ */
+enum irdma_status_code irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq)
+{
+ struct irdma_sc_cqp *cqp;
+
+ if (ceq->reg_cq)
+ irdma_sc_remove_cq_ctx(ceq, ceq->dev->ccq);
+
+ cqp = ceq->dev->cqp;
+ cqp->process_cqp_sds = irdma_update_sds_noccq;
+
+ return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_DESTROY_CEQ,
+ NULL);
+}
+
+/**
+ * irdma_sc_cceq_create - create cceq
+ * @ceq: ceq sc structure
+ * @scratch: u64 saved to be used during cqp completion
+ */
+enum irdma_status_code irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch)
+{
+ enum irdma_status_code ret_code;
+ struct irdma_sc_dev *dev = ceq->dev;
+
+ dev->ccq->vsi = ceq->vsi;
+ if (ceq->reg_cq) {
+ ret_code = irdma_sc_add_cq_ctx(ceq, ceq->dev->ccq);
+ if (ret_code)
+ return ret_code;
+ }
+
+ ret_code = irdma_sc_ceq_create(ceq, scratch, true);
+ if (!ret_code)
+ return irdma_sc_cceq_create_done(ceq);
+
+ return ret_code;
+}
+
+/**
+ * irdma_sc_ceq_destroy - destroy ceq
+ * @ceq: ceq sc structure
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+enum irdma_status_code irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch,
+ bool post_sq)
+{
+ struct irdma_sc_cqp *cqp;
+ __le64 *wqe;
+ u64 hdr;
+
+ cqp = ceq->dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return IRDMA_ERR_RING_FULL;
+
+ set_64bit_val(wqe, 16, ceq->elem_cnt);
+ set_64bit_val(wqe, 48, ceq->first_pm_pbl_idx);
+ hdr = ceq->ceq_id |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_CEQ) |
+ FIELD_PREP(IRDMA_CQPSQ_CEQ_LPBLSIZE, ceq->pbl_chunk_size) |
+ FIELD_PREP(IRDMA_CQPSQ_CEQ_VMAP, ceq->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_TPHEN, ceq->tph_en) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: CEQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16,
+ 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_process_ceq - process ceq
+ * @dev: sc device struct
+ * @ceq: ceq sc structure
+ *
+ * It is expected caller serializes this function with cleanup_ceqes()
+ * because these functions manipulate the same ceq
+ */
+void *irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq)
+{
+ u64 temp;
+ __le64 *ceqe;
+ struct irdma_sc_cq *cq = NULL;
+ struct irdma_sc_cq *temp_cq;
+ u8 polarity;
+ u32 cq_idx;
+ unsigned long flags;
+
+ do {
+ cq_idx = 0;
+ ceqe = IRDMA_GET_CURRENT_CEQ_ELEM(ceq);
+ get_64bit_val(ceqe, 0, &temp);
+ polarity = (u8)FIELD_GET(IRDMA_CEQE_VALID, temp);
+ if (polarity != ceq->polarity)
+ return NULL;
+
+ temp_cq = (struct irdma_sc_cq *)(unsigned long)(temp << 1);
+ if (!temp_cq) {
+ cq_idx = IRDMA_INVALID_CQ_IDX;
+ IRDMA_RING_MOVE_TAIL(ceq->ceq_ring);
+
+ if (!IRDMA_RING_CURRENT_TAIL(ceq->ceq_ring))
+ ceq->polarity ^= 1;
+ continue;
+ }
+
+ cq = temp_cq;
+ if (ceq->reg_cq) {
+ spin_lock_irqsave(&ceq->req_cq_lock, flags);
+ cq_idx = irdma_sc_find_reg_cq(ceq, cq);
+ spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
+ }
+
+ IRDMA_RING_MOVE_TAIL(ceq->ceq_ring);
+ if (!IRDMA_RING_CURRENT_TAIL(ceq->ceq_ring))
+ ceq->polarity ^= 1;
+ } while (cq_idx == IRDMA_INVALID_CQ_IDX);
+
+ if (cq)
+ irdma_sc_cq_ack(cq);
+ return cq;
+}
+
+/**
+ * irdma_sc_cleanup_ceqes - clear the valid ceqes ctx matching the cq
+ * @cq: cq for which the ceqes need to be cleaned up
+ * @ceq: ceq ptr
+ *
+ * The function is called after the cq is destroyed to cleanup
+ * its pending ceqe entries. It is expected caller serializes this
+ * function with process_ceq() in interrupt context.
+ */
+void irdma_sc_cleanup_ceqes(struct irdma_sc_cq *cq, struct irdma_sc_ceq *ceq)
+{
+ struct irdma_sc_cq *next_cq;
+ u8 ceq_polarity = ceq->polarity;
+ __le64 *ceqe;
+ u8 polarity;
+ u64 temp;
+ int next;
+ u32 i;
+
+ next = IRDMA_RING_GET_NEXT_TAIL(ceq->ceq_ring, 0);
+
+ for (i = 1; i <= IRDMA_RING_SIZE(*ceq); i++) {
+ ceqe = IRDMA_GET_CEQ_ELEM_AT_POS(ceq, next);
+
+ get_64bit_val(ceqe, 0, &temp);
+ polarity = (u8)FIELD_GET(IRDMA_CEQE_VALID, temp);
+ if (polarity != ceq_polarity)
+ return;
+
+ next_cq = (struct irdma_sc_cq *)(unsigned long)(temp << 1);
+ if (cq == next_cq)
+ set_64bit_val(ceqe, 0, temp & IRDMA_CEQE_VALID);
+
+ next = IRDMA_RING_GET_NEXT_TAIL(ceq->ceq_ring, i);
+ if (!next)
+ ceq_polarity ^= 1;
+ }
+}
+
+/**
+ * irdma_sc_aeq_init - initialize aeq
+ * @aeq: aeq structure ptr
+ * @info: aeq initialization info
+ */
+enum irdma_status_code irdma_sc_aeq_init(struct irdma_sc_aeq *aeq,
+ struct irdma_aeq_init_info *info)
+{
+ u32 pble_obj_cnt;
+
+ if (info->elem_cnt < info->dev->hw_attrs.min_hw_aeq_size ||
+ info->elem_cnt > info->dev->hw_attrs.max_hw_aeq_size)
+ return IRDMA_ERR_INVALID_SIZE;
+
+ pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
+
+ if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
+ return IRDMA_ERR_INVALID_PBLE_INDEX;
+
+ aeq->size = sizeof(*aeq);
+ aeq->polarity = 1;
+ aeq->aeqe_base = (struct irdma_sc_aeqe *)info->aeqe_base;
+ aeq->dev = info->dev;
+ aeq->elem_cnt = info->elem_cnt;
+ aeq->aeq_elem_pa = info->aeq_elem_pa;
+ IRDMA_RING_INIT(aeq->aeq_ring, aeq->elem_cnt);
+ aeq->virtual_map = info->virtual_map;
+ aeq->pbl_list = (aeq->virtual_map ? info->pbl_list : NULL);
+ aeq->pbl_chunk_size = (aeq->virtual_map ? info->pbl_chunk_size : 0);
+ aeq->first_pm_pbl_idx = (aeq->virtual_map ? info->first_pm_pbl_idx : 0);
+ aeq->msix_idx = info->msix_idx;
+ info->dev->aeq = aeq;
+
+ return 0;
+}
+
+/**
+ * irdma_sc_aeq_create - create aeq
+ * @aeq: aeq structure ptr
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum irdma_status_code irdma_sc_aeq_create(struct irdma_sc_aeq *aeq,
+ u64 scratch, bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_sc_cqp *cqp;
+ u64 hdr;
+
+ cqp = aeq->dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return IRDMA_ERR_RING_FULL;
+ set_64bit_val(wqe, 16, aeq->elem_cnt);
+ set_64bit_val(wqe, 32,
+ (aeq->virtual_map ? 0 : aeq->aeq_elem_pa));
+ set_64bit_val(wqe, 48,
+ (aeq->virtual_map ? aeq->first_pm_pbl_idx : 0));
+
+ hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_AEQ) |
+ FIELD_PREP(IRDMA_CQPSQ_AEQ_LPBLSIZE, aeq->pbl_chunk_size) |
+ FIELD_PREP(IRDMA_CQPSQ_AEQ_VMAP, aeq->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: AEQ_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_aeq_destroy - destroy aeq during close
+ * @aeq: aeq structure ptr
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum irdma_status_code irdma_sc_aeq_destroy(struct irdma_sc_aeq *aeq,
+ u64 scratch, bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_sc_cqp *cqp;
+ struct irdma_sc_dev *dev;
+ u64 hdr;
+
+ dev = aeq->dev;
+ writel(0, dev->hw_regs[IRDMA_PFINT_AEQCTL]);
+
+ cqp = dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return IRDMA_ERR_RING_FULL;
+ set_64bit_val(wqe, 16, aeq->elem_cnt);
+ set_64bit_val(wqe, 48, aeq->first_pm_pbl_idx);
+ hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_AEQ) |
+ FIELD_PREP(IRDMA_CQPSQ_AEQ_LPBLSIZE, aeq->pbl_chunk_size) |
+ FIELD_PREP(IRDMA_CQPSQ_AEQ_VMAP, aeq->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: AEQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16,
+ 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * irdma_sc_get_next_aeqe - get next aeq entry
+ * @aeq: aeq structure ptr
+ * @info: aeqe info to be returned
+ */
+enum irdma_status_code irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
+ struct irdma_aeqe_info *info)
+{
+ u64 temp, compl_ctx;
+ __le64 *aeqe;
+ u16 wqe_idx;
+ u8 ae_src;
+ u8 polarity;
+
+ aeqe = IRDMA_GET_CURRENT_AEQ_ELEM(aeq);
+ get_64bit_val(aeqe, 0, &compl_ctx);
+ get_64bit_val(aeqe, 8, &temp);
+ polarity = (u8)FIELD_GET(IRDMA_AEQE_VALID, temp);
+
+ if (aeq->polarity != polarity)
+ return IRDMA_ERR_Q_EMPTY;
+
+ print_hex_dump_debug("WQE: AEQ_ENTRY WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ aeqe, 16, false);
+
+ ae_src = (u8)FIELD_GET(IRDMA_AEQE_AESRC, temp);
+ wqe_idx = (u16)FIELD_GET(IRDMA_AEQE_WQDESCIDX, temp);
+ info->qp_cq_id = (u32)FIELD_GET(IRDMA_AEQE_QPCQID_LOW, temp) |
+ ((u32)FIELD_GET(IRDMA_AEQE_QPCQID_HI, temp) << 18);
+ info->ae_id = (u16)FIELD_GET(IRDMA_AEQE_AECODE, temp);
+ info->tcp_state = (u8)FIELD_GET(IRDMA_AEQE_TCPSTATE, temp);
+ info->iwarp_state = (u8)FIELD_GET(IRDMA_AEQE_IWSTATE, temp);
+ info->q2_data_written = (u8)FIELD_GET(IRDMA_AEQE_Q2DATA, temp);
+ info->aeqe_overflow = (bool)FIELD_GET(IRDMA_AEQE_OVERFLOW, temp);
+
+ info->ae_src = ae_src;
+ switch (info->ae_id) {
+ case IRDMA_AE_PRIV_OPERATION_DENIED:
+ case IRDMA_AE_AMP_INVALIDATE_TYPE1_MW:
+ case IRDMA_AE_AMP_MWBIND_ZERO_BASED_TYPE1_MW:
+ case IRDMA_AE_AMP_FASTREG_INVALID_PBL_HPS_CFG:
+ case IRDMA_AE_AMP_FASTREG_PBLE_MISMATCH:
+ case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
+ case IRDMA_AE_UDA_XMIT_BAD_PD:
+ case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT:
+ case IRDMA_AE_BAD_CLOSE:
+ case IRDMA_AE_RDMA_READ_WHILE_ORD_ZERO:
+ case IRDMA_AE_STAG_ZERO_INVALID:
+ case IRDMA_AE_IB_RREQ_AND_Q1_FULL:
+ case IRDMA_AE_IB_INVALID_REQUEST:
+ case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
+ case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
+ case IRDMA_AE_IB_REMOTE_OP_ERROR:
+ case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION:
+ case IRDMA_AE_DDP_UBE_INVALID_MO:
+ case IRDMA_AE_DDP_UBE_INVALID_QN:
+ case IRDMA_AE_DDP_NO_L_BIT:
+ case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
+ case IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
+ case IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST:
+ case IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
+ case IRDMA_AE_ROCE_RSP_LENGTH_ERROR:
+ case IRDMA_AE_INVALID_ARP_ENTRY:
+ case IRDMA_AE_INVALID_TCP_OPTION_RCVD:
+ case IRDMA_AE_STALE_ARP_ENTRY:
+ case IRDMA_AE_INVALID_AH_ENTRY:
+ case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
+ case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
+ case IRDMA_AE_LLP_TOO_MANY_RETRIES:
+ case IRDMA_AE_LLP_DOUBT_REACHABILITY:
+ case IRDMA_AE_LLP_CONNECTION_ESTABLISHED:
+ case IRDMA_AE_RESET_SENT:
+ case IRDMA_AE_TERMINATE_SENT:
+ case IRDMA_AE_RESET_NOT_SENT:
+ case IRDMA_AE_LCE_QP_CATASTROPHIC:
+ case IRDMA_AE_QP_SUSPEND_COMPLETE:
+ case IRDMA_AE_UDA_L4LEN_INVALID:
+ info->qp = true;
+ info->compl_ctx = compl_ctx;
+ break;
+ case IRDMA_AE_LCE_CQ_CATASTROPHIC:
+ info->cq = true;
+ info->compl_ctx = compl_ctx << 1;
+ ae_src = IRDMA_AE_SOURCE_RSVD;
+ break;
+ case IRDMA_AE_ROCE_EMPTY_MCG:
+ case IRDMA_AE_ROCE_BAD_MC_IP_ADDR:
+ case IRDMA_AE_ROCE_BAD_MC_QPID:
+ case IRDMA_AE_MCG_QP_PROTOCOL_MISMATCH:
+ fallthrough;
+ case IRDMA_AE_LLP_CONNECTION_RESET:
+ case IRDMA_AE_LLP_SYN_RECEIVED:
+ case IRDMA_AE_LLP_FIN_RECEIVED:
+ case IRDMA_AE_LLP_CLOSE_COMPLETE:
+ case IRDMA_AE_LLP_TERMINATE_RECEIVED:
+ case IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE:
+ ae_src = IRDMA_AE_SOURCE_RSVD;
+ info->qp = true;
+ info->compl_ctx = compl_ctx;
+ break;
+ default:
+ break;
+ }
+
+ switch (ae_src) {
+ case IRDMA_AE_SOURCE_RQ:
+ case IRDMA_AE_SOURCE_RQ_0011:
+ info->qp = true;
+ info->rq = true;
+ info->wqe_idx = wqe_idx;
+ info->compl_ctx = compl_ctx;
+ break;
+ case IRDMA_AE_SOURCE_CQ:
+ case IRDMA_AE_SOURCE_CQ_0110:
+ case IRDMA_AE_SOURCE_CQ_1010:
+ case IRDMA_AE_SOURCE_CQ_1110:
+ info->cq = true;
+ info->compl_ctx = compl_ctx << 1;
+ break;
+ case IRDMA_AE_SOURCE_SQ:
+ case IRDMA_AE_SOURCE_SQ_0111:
+ info->qp = true;
+ info->sq = true;
+ info->wqe_idx = wqe_idx;
+ info->compl_ctx = compl_ctx;
+ break;
+ case IRDMA_AE_SOURCE_IN_RR_WR:
+ case IRDMA_AE_SOURCE_IN_RR_WR_1011:
+ info->qp = true;
+ info->compl_ctx = compl_ctx;
+ info->in_rdrsp_wr = true;
+ break;
+ case IRDMA_AE_SOURCE_OUT_RR:
+ case IRDMA_AE_SOURCE_OUT_RR_1111:
+ info->qp = true;
+ info->compl_ctx = compl_ctx;
+ info->out_rdrsp = true;
+ break;
+ case IRDMA_AE_SOURCE_RSVD:
+ default:
+ break;
+ }
+
+ IRDMA_RING_MOVE_TAIL(aeq->aeq_ring);
+ if (!IRDMA_RING_CURRENT_TAIL(aeq->aeq_ring))
+ aeq->polarity ^= 1;
+
+ return 0;
+}
+
+/**
+ * irdma_sc_repost_aeq_entries - repost completed aeq entries
+ * @dev: sc device struct
+ * @count: allocate count
+ */
+enum irdma_status_code irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count)
+{
+ writel(count, dev->hw_regs[IRDMA_AEQALLOC]);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_ccq_init - initialize control cq
+ * @cq: sc's cq ctruct
+ * @info: info for control cq initialization
+ */
+enum irdma_status_code irdma_sc_ccq_init(struct irdma_sc_cq *cq,
+ struct irdma_ccq_init_info *info)
+{
+ u32 pble_obj_cnt;
+
+ if (info->num_elem < info->dev->hw_attrs.uk_attrs.min_hw_cq_size ||
+ info->num_elem > info->dev->hw_attrs.uk_attrs.max_hw_cq_size)
+ return IRDMA_ERR_INVALID_SIZE;
+
+ if (info->ceq_id > (info->dev->hmc_fpm_misc.max_ceqs - 1))
+ return IRDMA_ERR_INVALID_CEQ_ID;
+
+ pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
+
+ if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
+ return IRDMA_ERR_INVALID_PBLE_INDEX;
+
+ cq->cq_pa = info->cq_pa;
+ cq->cq_uk.cq_base = info->cq_base;
+ cq->shadow_area_pa = info->shadow_area_pa;
+ cq->cq_uk.shadow_area = info->shadow_area;
+ cq->shadow_read_threshold = info->shadow_read_threshold;
+ cq->dev = info->dev;
+ cq->ceq_id = info->ceq_id;
+ cq->cq_uk.cq_size = info->num_elem;
+ cq->cq_type = IRDMA_CQ_TYPE_CQP;
+ cq->ceqe_mask = info->ceqe_mask;
+ IRDMA_RING_INIT(cq->cq_uk.cq_ring, info->num_elem);
+ cq->cq_uk.cq_id = 0; /* control cq is id 0 always */
+ cq->ceq_id_valid = info->ceq_id_valid;
+ cq->tph_en = info->tph_en;
+ cq->tph_val = info->tph_val;
+ cq->cq_uk.avoid_mem_cflct = info->avoid_mem_cflct;
+ cq->pbl_list = info->pbl_list;
+ cq->virtual_map = info->virtual_map;
+ cq->pbl_chunk_size = info->pbl_chunk_size;
+ cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
+ cq->cq_uk.polarity = true;
+ cq->vsi = info->vsi;
+ cq->cq_uk.cq_ack_db = cq->dev->cq_ack_db;
+
+ /* Only applicable to CQs other than CCQ so initialize to zero */
+ cq->cq_uk.cqe_alloc_db = NULL;
+
+ info->dev->ccq = cq;
+ return 0;
+}
+
+/**
+ * irdma_sc_ccq_create_done - poll cqp for ccq create
+ * @ccq: ccq sc struct
+ */
+static inline enum irdma_status_code irdma_sc_ccq_create_done(struct irdma_sc_cq *ccq)
+{
+ struct irdma_sc_cqp *cqp;
+
+ cqp = ccq->dev->cqp;
+
+ return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_CREATE_CQ, NULL);
+}
+
+/**
+ * irdma_sc_ccq_create - create control cq
+ * @ccq: ccq sc struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @check_overflow: overlow flag for ccq
+ * @post_sq: flag for cqp db to ring
+ */
+enum irdma_status_code irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch,
+ bool check_overflow, bool post_sq)
+{
+ enum irdma_status_code ret_code;
+
+ ret_code = irdma_sc_cq_create(ccq, scratch, check_overflow, post_sq);
+ if (ret_code)
+ return ret_code;
+
+ if (post_sq) {
+ ret_code = irdma_sc_ccq_create_done(ccq);
+ if (ret_code)
+ return ret_code;
+ }
+ ccq->dev->cqp->process_cqp_sds = irdma_cqp_sds_cmd;
+
+ return 0;
+}
+
+/**
+ * irdma_sc_ccq_destroy - destroy ccq during close
+ * @ccq: ccq sc struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+enum irdma_status_code irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch,
+ bool post_sq)
+{
+ struct irdma_sc_cqp *cqp;
+ __le64 *wqe;
+ u64 hdr;
+ enum irdma_status_code ret_code = 0;
+ u32 tail, val, error;
+
+ cqp = ccq->dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return IRDMA_ERR_RING_FULL;
+
+ set_64bit_val(wqe, 0, ccq->cq_uk.cq_size);
+ set_64bit_val(wqe, 8, (uintptr_t)ccq >> 1);
+ set_64bit_val(wqe, 40, ccq->shadow_area_pa);
+
+ hdr = ccq->cq_uk.cq_id |
+ FLD_LS_64(ccq->dev, (ccq->ceq_id_valid ? ccq->ceq_id : 0),
+ IRDMA_CQPSQ_CQ_CEQID) |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_CQ) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, ccq->ceqe_mask) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, ccq->ceq_id_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_TPHEN, ccq->tph_en) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT, ccq->cq_uk.avoid_mem_cflct) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: CCQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16,
+ 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
+
+ if (post_sq) {
+ irdma_sc_cqp_post_sq(cqp);
+ ret_code = irdma_cqp_poll_registers(cqp, tail,
+ cqp->dev->hw_attrs.max_done_count);
+ }
+
+ cqp->process_cqp_sds = irdma_update_sds_noccq;
+
+ return ret_code;
+}
+
+/**
+ * irdma_sc_init_iw_hmc() - queries fpm values using cqp and populates hmc_info
+ * @dev : ptr to irdma_dev struct
+ * @hmc_fn_id: hmc function id
+ */
+enum irdma_status_code irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev,
+ u8 hmc_fn_id)
+{
+ struct irdma_hmc_info *hmc_info;
+ struct irdma_hmc_fpm_misc *hmc_fpm_misc;
+ struct irdma_dma_mem query_fpm_mem;
+ enum irdma_status_code ret_code = 0;
+ u8 wait_type;
+
+ hmc_info = dev->hmc_info;
+ hmc_fpm_misc = &dev->hmc_fpm_misc;
+ query_fpm_mem.pa = dev->fpm_query_buf_pa;
+ query_fpm_mem.va = dev->fpm_query_buf;
+ hmc_info->hmc_fn_id = hmc_fn_id;
+ wait_type = (u8)IRDMA_CQP_WAIT_POLL_REGS;
+
+ ret_code = irdma_sc_query_fpm_val(dev->cqp, 0, hmc_info->hmc_fn_id,
+ &query_fpm_mem, true, wait_type);
+ if (ret_code)
+ return ret_code;
+
+ /* parse the fpm_query_buf and fill hmc obj info */
+ ret_code = irdma_sc_parse_fpm_query_buf(dev, query_fpm_mem.va, hmc_info,
+ hmc_fpm_misc);
+
+ print_hex_dump_debug("HMC: QUERY FPM BUFFER", DUMP_PREFIX_OFFSET, 16,
+ 8, query_fpm_mem.va, IRDMA_QUERY_FPM_BUF_SIZE,
+ false);
+ return ret_code;
+}
+
+/**
+ * irdma_sc_cfg_iw_fpm() - commits hmc obj cnt values using cqp
+ * command and populates fpm base address in hmc_info
+ * @dev : ptr to irdma_dev struct
+ * @hmc_fn_id: hmc function id
+ */
+static enum irdma_status_code irdma_sc_cfg_iw_fpm(struct irdma_sc_dev *dev,
+ u8 hmc_fn_id)
+{
+ struct irdma_hmc_info *hmc_info;
+ struct irdma_hmc_obj_info *obj_info;
+ __le64 *buf;
+ struct irdma_dma_mem commit_fpm_mem;
+ enum irdma_status_code ret_code = 0;
+ u8 wait_type;
+
+ hmc_info = dev->hmc_info;
+ obj_info = hmc_info->hmc_obj;
+ buf = dev->fpm_commit_buf;
+
+ set_64bit_val(buf, 0, (u64)obj_info[IRDMA_HMC_IW_QP].cnt);
+ set_64bit_val(buf, 8, (u64)obj_info[IRDMA_HMC_IW_CQ].cnt);
+ set_64bit_val(buf, 16, (u64)0); /* RSRVD */
+ set_64bit_val(buf, 24, (u64)obj_info[IRDMA_HMC_IW_HTE].cnt);
+ set_64bit_val(buf, 32, (u64)obj_info[IRDMA_HMC_IW_ARP].cnt);
+ set_64bit_val(buf, 40, (u64)0); /* RSVD */
+ set_64bit_val(buf, 48, (u64)obj_info[IRDMA_HMC_IW_MR].cnt);
+ set_64bit_val(buf, 56, (u64)obj_info[IRDMA_HMC_IW_XF].cnt);
+ set_64bit_val(buf, 64, (u64)obj_info[IRDMA_HMC_IW_XFFL].cnt);
+ set_64bit_val(buf, 72, (u64)obj_info[IRDMA_HMC_IW_Q1].cnt);
+ set_64bit_val(buf, 80, (u64)obj_info[IRDMA_HMC_IW_Q1FL].cnt);
+ set_64bit_val(buf, 88,
+ (u64)obj_info[IRDMA_HMC_IW_TIMER].cnt);
+ set_64bit_val(buf, 96,
+ (u64)obj_info[IRDMA_HMC_IW_FSIMC].cnt);
+ set_64bit_val(buf, 104,
+ (u64)obj_info[IRDMA_HMC_IW_FSIAV].cnt);
+ set_64bit_val(buf, 112,
+ (u64)obj_info[IRDMA_HMC_IW_PBLE].cnt);
+ set_64bit_val(buf, 120, (u64)0); /* RSVD */
+ set_64bit_val(buf, 128, (u64)obj_info[IRDMA_HMC_IW_RRF].cnt);
+ set_64bit_val(buf, 136,
+ (u64)obj_info[IRDMA_HMC_IW_RRFFL].cnt);
+ set_64bit_val(buf, 144, (u64)obj_info[IRDMA_HMC_IW_HDR].cnt);
+ set_64bit_val(buf, 152, (u64)obj_info[IRDMA_HMC_IW_MD].cnt);
+ set_64bit_val(buf, 160,
+ (u64)obj_info[IRDMA_HMC_IW_OOISC].cnt);
+ set_64bit_val(buf, 168,
+ (u64)obj_info[IRDMA_HMC_IW_OOISCFFL].cnt);
+
+ commit_fpm_mem.pa = dev->fpm_commit_buf_pa;
+ commit_fpm_mem.va = dev->fpm_commit_buf;
+
+ wait_type = (u8)IRDMA_CQP_WAIT_POLL_REGS;
+ print_hex_dump_debug("HMC: COMMIT FPM BUFFER", DUMP_PREFIX_OFFSET, 16,
+ 8, commit_fpm_mem.va, IRDMA_COMMIT_FPM_BUF_SIZE,
+ false);
+ ret_code = irdma_sc_commit_fpm_val(dev->cqp, 0, hmc_info->hmc_fn_id,
+ &commit_fpm_mem, true, wait_type);
+ if (!ret_code)
+ ret_code = irdma_sc_parse_fpm_commit_buf(dev, dev->fpm_commit_buf,
+ hmc_info->hmc_obj,
+ &hmc_info->sd_table.sd_cnt);
+ print_hex_dump_debug("HMC: COMMIT FPM BUFFER", DUMP_PREFIX_OFFSET, 16,
+ 8, commit_fpm_mem.va, IRDMA_COMMIT_FPM_BUF_SIZE,
+ false);
+
+ return ret_code;
+}
+
+/**
+ * cqp_sds_wqe_fill - fill cqp wqe doe sd
+ * @cqp: struct for cqp hw
+ * @info: sd info for wqe
+ * @scratch: u64 saved to be used during cqp completion
+ */
+static enum irdma_status_code
+cqp_sds_wqe_fill(struct irdma_sc_cqp *cqp, struct irdma_update_sds_info *info,
+ u64 scratch)
+{
+ u64 data;
+ u64 hdr;
+ __le64 *wqe;
+ int mem_entries, wqe_entries;
+ struct irdma_dma_mem *sdbuf = &cqp->sdbuf;
+ u64 offset = 0;
+ u32 wqe_idx;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
+ if (!wqe)
+ return IRDMA_ERR_RING_FULL;
+
+ wqe_entries = (info->cnt > 3) ? 3 : info->cnt;
+ mem_entries = info->cnt - wqe_entries;
+
+ if (mem_entries) {
+ offset = wqe_idx * IRDMA_UPDATE_SD_BUFF_SIZE;
+ memcpy(((char *)sdbuf->va + offset), &info->entry[3], mem_entries << 4);
+
+ data = (u64)sdbuf->pa + offset;
+ } else {
+ data = 0;
+ }
+ data |= FIELD_PREP(IRDMA_CQPSQ_UPESD_HMCFNID, info->hmc_fn_id);
+ set_64bit_val(wqe, 16, data);
+
+ switch (wqe_entries) {
+ case 3:
+ set_64bit_val(wqe, 48,
+ (FIELD_PREP(IRDMA_CQPSQ_UPESD_SDCMD, info->entry[2].cmd) |
+ FIELD_PREP(IRDMA_CQPSQ_UPESD_ENTRY_VALID, 1)));
+
+ set_64bit_val(wqe, 56, info->entry[2].data);
+ fallthrough;
+ case 2:
+ set_64bit_val(wqe, 32,
+ (FIELD_PREP(IRDMA_CQPSQ_UPESD_SDCMD, info->entry[1].cmd) |
+ FIELD_PREP(IRDMA_CQPSQ_UPESD_ENTRY_VALID, 1)));
+
+ set_64bit_val(wqe, 40, info->entry[1].data);
+ fallthrough;
+ case 1:
+ set_64bit_val(wqe, 0,
+ FIELD_PREP(IRDMA_CQPSQ_UPESD_SDCMD, info->entry[0].cmd));
+
+ set_64bit_val(wqe, 8, info->entry[0].data);
+ break;
+ default:
+ break;
+ }
+
+ hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_UPDATE_PE_SDS) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) |
+ FIELD_PREP(IRDMA_CQPSQ_UPESD_ENTRY_COUNT, mem_entries);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ if (mem_entries)
+ print_hex_dump_debug("WQE: UPDATE_PE_SDS WQE Buffer",
+ DUMP_PREFIX_OFFSET, 16, 8,
+ (char *)sdbuf->va + offset,
+ mem_entries << 4, false);
+
+ print_hex_dump_debug("WQE: UPDATE_PE_SDS WQE", DUMP_PREFIX_OFFSET, 16,
+ 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+
+ return 0;
+}
+
+/**
+ * irdma_update_pe_sds - cqp wqe for sd
+ * @dev: ptr to irdma_dev struct
+ * @info: sd info for sd's
+ * @scratch: u64 saved to be used during cqp completion
+ */
+static enum irdma_status_code
+irdma_update_pe_sds(struct irdma_sc_dev *dev,
+ struct irdma_update_sds_info *info, u64 scratch)
+{
+ struct irdma_sc_cqp *cqp = dev->cqp;
+ enum irdma_status_code ret_code;
+
+ ret_code = cqp_sds_wqe_fill(cqp, info, scratch);
+ if (!ret_code)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return ret_code;
+}
+
+/**
+ * irdma_update_sds_noccq - update sd before ccq created
+ * @dev: sc device struct
+ * @info: sd info for sd's
+ */
+enum irdma_status_code
+irdma_update_sds_noccq(struct irdma_sc_dev *dev,
+ struct irdma_update_sds_info *info)
+{
+ u32 error, val, tail;
+ struct irdma_sc_cqp *cqp = dev->cqp;
+ enum irdma_status_code ret_code;
+
+ ret_code = cqp_sds_wqe_fill(cqp, info, 0);
+ if (ret_code)
+ return ret_code;
+
+ irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
+
+ irdma_sc_cqp_post_sq(cqp);
+ return irdma_cqp_poll_registers(cqp, tail,
+ cqp->dev->hw_attrs.max_done_count);
+}
+
+/**
+ * irdma_sc_static_hmc_pages_allocated - cqp wqe to allocate hmc pages
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @hmc_fn_id: hmc function id
+ * @post_sq: flag for cqp db to ring
+ * @poll_registers: flag to poll register for cqp completion
+ */
+enum irdma_status_code
+irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
+ u8 hmc_fn_id, bool post_sq,
+ bool poll_registers)
+{
+ u64 hdr;
+ __le64 *wqe;
+ u32 tail, val, error;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return IRDMA_ERR_RING_FULL;
+
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMA_SHMC_PAGE_ALLOCATED_HMC_FN_ID, hmc_fn_id));
+
+ hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE,
+ IRDMA_CQP_OP_SHMC_PAGES_ALLOCATED) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: SHMC_PAGES_ALLOCATED WQE",
+ DUMP_PREFIX_OFFSET, 16, 8, wqe,
+ IRDMA_CQP_WQE_SIZE * 8, false);
+ irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
+
+ if (post_sq) {
+ irdma_sc_cqp_post_sq(cqp);
+ if (poll_registers)
+ /* check for cqp sq tail update */
+ return irdma_cqp_poll_registers(cqp, tail,
+ cqp->dev->hw_attrs.max_done_count);
+ else
+ return irdma_sc_poll_for_cqp_op_done(cqp,
+ IRDMA_CQP_OP_SHMC_PAGES_ALLOCATED,
+ NULL);
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_cqp_ring_full - check if cqp ring is full
+ * @cqp: struct for cqp hw
+ */
+static bool irdma_cqp_ring_full(struct irdma_sc_cqp *cqp)
+{
+ return IRDMA_RING_FULL_ERR(cqp->sq_ring);
+}
+
+/**
+ * irdma_est_sd - returns approximate number of SDs for HMC
+ * @dev: sc device struct
+ * @hmc_info: hmc structure, size and count for HMC objects
+ */
+static u32 irdma_est_sd(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info)
+{
+ int i;
+ u64 size = 0;
+ u64 sd;
+
+ for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++)
+ if (i != IRDMA_HMC_IW_PBLE)
+ size += round_up(hmc_info->hmc_obj[i].cnt *
+ hmc_info->hmc_obj[i].size, 512);
+ size += round_up(hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt *
+ hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].size, 512);
+ if (size & 0x1FFFFF)
+ sd = (size >> 21) + 1; /* add 1 for remainder */
+ else
+ sd = size >> 21;
+ if (sd > 0xFFFFFFFF) {
+ ibdev_dbg(to_ibdev(dev), "HMC: sd overflow[%lld]\n", sd);
+ sd = 0xFFFFFFFF - 1;
+ }
+
+ return (u32)sd;
+}
+
+/**
+ * irdma_sc_query_rdma_features_done - poll cqp for query features done
+ * @cqp: struct for cqp hw
+ */
+static enum irdma_status_code
+irdma_sc_query_rdma_features_done(struct irdma_sc_cqp *cqp)
+{
+ return irdma_sc_poll_for_cqp_op_done(cqp,
+ IRDMA_CQP_OP_QUERY_RDMA_FEATURES,
+ NULL);
+}
+
+/**
+ * irdma_sc_query_rdma_features - query RDMA features and FW ver
+ * @cqp: struct for cqp hw
+ * @buf: buffer to hold query info
+ * @scratch: u64 saved to be used during cqp completion
+ */
+static enum irdma_status_code
+irdma_sc_query_rdma_features(struct irdma_sc_cqp *cqp,
+ struct irdma_dma_mem *buf, u64 scratch)
+{
+ __le64 *wqe;
+ u64 temp;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return IRDMA_ERR_RING_FULL;
+
+ temp = buf->pa;
+ set_64bit_val(wqe, 32, temp);
+
+ temp = FIELD_PREP(IRDMA_CQPSQ_QUERY_RDMA_FEATURES_WQEVALID,
+ cqp->polarity) |
+ FIELD_PREP(IRDMA_CQPSQ_QUERY_RDMA_FEATURES_BUF_LEN, buf->size) |
+ FIELD_PREP(IRDMA_CQPSQ_UP_OP, IRDMA_CQP_OP_QUERY_RDMA_FEATURES);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, temp);
+
+ print_hex_dump_debug("WQE: QUERY RDMA FEATURES", DUMP_PREFIX_OFFSET,
+ 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_get_rdma_features - get RDMA features
+ * @dev: sc device struct
+ */
+enum irdma_status_code irdma_get_rdma_features(struct irdma_sc_dev *dev)
+{
+ enum irdma_status_code ret_code;
+ struct irdma_dma_mem feat_buf;
+ u64 temp;
+ u16 byte_idx, feat_type, feat_cnt, feat_idx;
+
+ feat_buf.size = ALIGN(IRDMA_FEATURE_BUF_SIZE,
+ IRDMA_FEATURE_BUF_ALIGNMENT);
+ feat_buf.va = dma_alloc_coherent(dev->hw->device, feat_buf.size,
+ &feat_buf.pa, GFP_KERNEL);
+ if (!feat_buf.va)
+ return IRDMA_ERR_NO_MEMORY;
+
+ ret_code = irdma_sc_query_rdma_features(dev->cqp, &feat_buf, 0);
+ if (!ret_code)
+ ret_code = irdma_sc_query_rdma_features_done(dev->cqp);
+ if (ret_code)
+ goto exit;
+
+ get_64bit_val(feat_buf.va, 0, &temp);
+ feat_cnt = (u16)FIELD_GET(IRDMA_FEATURE_CNT, temp);
+ if (feat_cnt < 2) {
+ ret_code = IRDMA_ERR_INVALID_FEAT_CNT;
+ goto exit;
+ } else if (feat_cnt > IRDMA_MAX_FEATURES) {
+ ibdev_dbg(to_ibdev(dev),
+ "DEV: feature buf size insufficient, retrying with larger buffer\n");
+ dma_free_coherent(dev->hw->device, feat_buf.size, feat_buf.va,
+ feat_buf.pa);
+ feat_buf.va = NULL;
+ feat_buf.size = ALIGN(8 * feat_cnt,
+ IRDMA_FEATURE_BUF_ALIGNMENT);
+ feat_buf.va = dma_alloc_coherent(dev->hw->device,
+ feat_buf.size, &feat_buf.pa,
+ GFP_KERNEL);
+ if (!feat_buf.va)
+ return IRDMA_ERR_NO_MEMORY;
+
+ ret_code = irdma_sc_query_rdma_features(dev->cqp, &feat_buf, 0);
+ if (!ret_code)
+ ret_code = irdma_sc_query_rdma_features_done(dev->cqp);
+ if (ret_code)
+ goto exit;
+
+ get_64bit_val(feat_buf.va, 0, &temp);
+ feat_cnt = (u16)FIELD_GET(IRDMA_FEATURE_CNT, temp);
+ if (feat_cnt < 2) {
+ ret_code = IRDMA_ERR_INVALID_FEAT_CNT;
+ goto exit;
+ }
+ }
+
+ print_hex_dump_debug("WQE: QUERY RDMA FEATURES", DUMP_PREFIX_OFFSET,
+ 16, 8, feat_buf.va, feat_cnt * 8, false);
+
+ for (byte_idx = 0, feat_idx = 0; feat_idx < min(feat_cnt, (u16)IRDMA_MAX_FEATURES);
+ feat_idx++, byte_idx += 8) {
+ get_64bit_val(feat_buf.va, byte_idx, &temp);
+ feat_type = FIELD_GET(IRDMA_FEATURE_TYPE, temp);
+ if (feat_type >= IRDMA_MAX_FEATURES) {
+ ibdev_dbg(to_ibdev(dev),
+ "DEV: found unrecognized feature type %d\n",
+ feat_type);
+ continue;
+ }
+ dev->feature_info[feat_type] = temp;
+ }
+exit:
+ dma_free_coherent(dev->hw->device, feat_buf.size, feat_buf.va,
+ feat_buf.pa);
+ feat_buf.va = NULL;
+ return ret_code;
+}
+
+static u32 irdma_q1_cnt(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info, u32 qpwanted)
+{
+ u32 q1_cnt;
+
+ if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) {
+ q1_cnt = roundup_pow_of_two(dev->hw_attrs.max_hw_ird * 2 * qpwanted);
+ } else {
+ if (dev->cqp->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
+ q1_cnt = roundup_pow_of_two(dev->hw_attrs.max_hw_ird * 2 * qpwanted + 512);
+ else
+ q1_cnt = dev->hw_attrs.max_hw_ird * 2 * qpwanted;
+ }
+
+ return q1_cnt;
+}
+
+static void cfg_fpm_value_gen_1(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info, u32 qpwanted)
+{
+ hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt = roundup_pow_of_two(qpwanted * dev->hw_attrs.max_hw_wqes);
+}
+
+static void cfg_fpm_value_gen_2(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info, u32 qpwanted)
+{
+ struct irdma_hmc_fpm_misc *hmc_fpm_misc = &dev->hmc_fpm_misc;
+
+ hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt =
+ 4 * hmc_fpm_misc->xf_block_size * qpwanted;
+
+ hmc_info->hmc_obj[IRDMA_HMC_IW_HDR].cnt = qpwanted;
+
+ if (hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].max_cnt)
+ hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].cnt = 32 * qpwanted;
+ if (hmc_info->hmc_obj[IRDMA_HMC_IW_RRFFL].max_cnt)
+ hmc_info->hmc_obj[IRDMA_HMC_IW_RRFFL].cnt =
+ hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].cnt /
+ hmc_fpm_misc->rrf_block_size;
+ if (hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].max_cnt)
+ hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].cnt = 32 * qpwanted;
+ if (hmc_info->hmc_obj[IRDMA_HMC_IW_OOISCFFL].max_cnt)
+ hmc_info->hmc_obj[IRDMA_HMC_IW_OOISCFFL].cnt =
+ hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].cnt /
+ hmc_fpm_misc->ooiscf_block_size;
+}
+
+/**
+ * irdma_cfg_fpm_val - configure HMC objects
+ * @dev: sc device struct
+ * @qp_count: desired qp count
+ */
+enum irdma_status_code irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
+{
+ struct irdma_virt_mem virt_mem;
+ u32 i, mem_size;
+ u32 qpwanted, mrwanted, pblewanted;
+ u32 powerof2, hte;
+ u32 sd_needed;
+ u32 sd_diff;
+ u32 loop_count = 0;
+ struct irdma_hmc_info *hmc_info;
+ struct irdma_hmc_fpm_misc *hmc_fpm_misc;
+ enum irdma_status_code ret_code = 0;
+
+ hmc_info = dev->hmc_info;
+ hmc_fpm_misc = &dev->hmc_fpm_misc;
+
+ ret_code = irdma_sc_init_iw_hmc(dev, dev->hmc_fn_id);
+ if (ret_code) {
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: irdma_sc_init_iw_hmc returned error_code = %d\n",
+ ret_code);
+ return ret_code;
+ }
+
+ for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++)
+ hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt;
+ sd_needed = irdma_est_sd(dev, hmc_info);
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: FW max resources sd_needed[%08d] first_sd_index[%04d]\n",
+ sd_needed, hmc_info->first_sd_index);
+ ibdev_dbg(to_ibdev(dev), "HMC: sd count %d where max sd is %d\n",
+ hmc_info->sd_table.sd_cnt, hmc_fpm_misc->max_sds);
+
+ qpwanted = min(qp_count, hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt);
+
+ powerof2 = 1;
+ while (powerof2 <= qpwanted)
+ powerof2 *= 2;
+ powerof2 /= 2;
+ qpwanted = powerof2;
+
+ mrwanted = hmc_info->hmc_obj[IRDMA_HMC_IW_MR].max_cnt;
+ pblewanted = hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].max_cnt;
+
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: req_qp=%d max_sd=%d, max_qp = %d, max_cq=%d, max_mr=%d, max_pble=%d, mc=%d, av=%d\n",
+ qp_count, hmc_fpm_misc->max_sds,
+ hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt,
+ hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt,
+ hmc_info->hmc_obj[IRDMA_HMC_IW_MR].max_cnt,
+ hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].max_cnt,
+ hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].max_cnt,
+ hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt);
+ hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt =
+ hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].max_cnt;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt =
+ hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].cnt =
+ hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].max_cnt;
+
+ hmc_info->hmc_obj[IRDMA_HMC_IW_APBVT_ENTRY].cnt = 1;
+
+ while (irdma_q1_cnt(dev, hmc_info, qpwanted) > hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].max_cnt)
+ qpwanted /= 2;
+
+ do {
+ ++loop_count;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt = qpwanted;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt =
+ min(2 * qpwanted, hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt);
+ hmc_info->hmc_obj[IRDMA_HMC_IW_RESERVED].cnt = 0; /* Reserved */
+ hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt = mrwanted;
+
+ hte = round_up(qpwanted + hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt, 512);
+ powerof2 = 1;
+ while (powerof2 < hte)
+ powerof2 *= 2;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_HTE].cnt =
+ powerof2 * hmc_fpm_misc->ht_multiplier;
+ if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
+ cfg_fpm_value_gen_1(dev, hmc_info, qpwanted);
+ else
+ cfg_fpm_value_gen_2(dev, hmc_info, qpwanted);
+
+ hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].cnt = irdma_q1_cnt(dev, hmc_info, qpwanted);
+ hmc_info->hmc_obj[IRDMA_HMC_IW_XFFL].cnt =
+ hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt / hmc_fpm_misc->xf_block_size;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_Q1FL].cnt =
+ hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].cnt / hmc_fpm_misc->q1_block_size;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_TIMER].cnt =
+ (round_up(qpwanted, 512) / 512 + 1) * hmc_fpm_misc->timer_bucket;
+
+ hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt = pblewanted;
+ sd_needed = irdma_est_sd(dev, hmc_info);
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: sd_needed = %d, hmc_fpm_misc->max_sds=%d, mrwanted=%d, pblewanted=%d qpwanted=%d\n",
+ sd_needed, hmc_fpm_misc->max_sds, mrwanted,
+ pblewanted, qpwanted);
+
+ /* Do not reduce resources further. All objects fit with max SDs */
+ if (sd_needed <= hmc_fpm_misc->max_sds)
+ break;
+
+ sd_diff = sd_needed - hmc_fpm_misc->max_sds;
+ if (sd_diff > 128) {
+ if (qpwanted > 128 && sd_diff > 144)
+ qpwanted /= 2;
+ mrwanted /= 2;
+ pblewanted /= 2;
+ continue;
+ }
+ if (dev->cqp->hmc_profile != IRDMA_HMC_PROFILE_FAVOR_VF &&
+ pblewanted > (512 * FPM_MULTIPLIER * sd_diff)) {
+ pblewanted -= 256 * FPM_MULTIPLIER * sd_diff;
+ continue;
+ } else if (pblewanted > (100 * FPM_MULTIPLIER)) {
+ pblewanted -= 10 * FPM_MULTIPLIER;
+ } else if (pblewanted > FPM_MULTIPLIER) {
+ pblewanted -= FPM_MULTIPLIER;
+ } else if (qpwanted <= 128) {
+ if (hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt > 256)
+ hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt /= 2;
+ if (hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt > 256)
+ hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt /= 2;
+ }
+ if (mrwanted > FPM_MULTIPLIER)
+ mrwanted -= FPM_MULTIPLIER;
+ if (!(loop_count % 10) && qpwanted > 128) {
+ qpwanted /= 2;
+ if (hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt > 256)
+ hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt /= 2;
+ }
+ } while (loop_count < 2000);
+
+ if (sd_needed > hmc_fpm_misc->max_sds) {
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: cfg_fpm failed loop_cnt=%d, sd_needed=%d, max sd count %d\n",
+ loop_count, sd_needed, hmc_info->sd_table.sd_cnt);
+ return IRDMA_ERR_CFG;
+ }
+
+ if (loop_count > 1 && sd_needed < hmc_fpm_misc->max_sds) {
+ pblewanted += (hmc_fpm_misc->max_sds - sd_needed) * 256 *
+ FPM_MULTIPLIER;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt = pblewanted;
+ sd_needed = irdma_est_sd(dev, hmc_info);
+ }
+
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: loop_cnt=%d, sd_needed=%d, qpcnt = %d, cqcnt=%d, mrcnt=%d, pblecnt=%d, mc=%d, ah=%d, max sd count %d, first sd index %d\n",
+ loop_count, sd_needed,
+ hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt,
+ hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt,
+ hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt,
+ hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt,
+ hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt,
+ hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt,
+ hmc_info->sd_table.sd_cnt, hmc_info->first_sd_index);
+
+ ret_code = irdma_sc_cfg_iw_fpm(dev, dev->hmc_fn_id);
+ if (ret_code) {
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: cfg_iw_fpm returned error_code[x%08X]\n",
+ readl(dev->hw_regs[IRDMA_CQPERRCODES]));
+ return ret_code;
+ }
+
+ mem_size = sizeof(struct irdma_hmc_sd_entry) *
+ (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index + 1);
+ virt_mem.size = mem_size;
+ virt_mem.va = kzalloc(virt_mem.size, GFP_KERNEL);
+ if (!virt_mem.va) {
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: failed to allocate memory for sd_entry buffer\n");
+ return IRDMA_ERR_NO_MEMORY;
+ }
+ hmc_info->sd_table.sd_entry = virt_mem.va;
+
+ return ret_code;
+}
+
+/**
+ * irdma_exec_cqp_cmd - execute cqp cmd when wqe are available
+ * @dev: rdma device
+ * @pcmdinfo: cqp command info
+ */
+static enum irdma_status_code irdma_exec_cqp_cmd(struct irdma_sc_dev *dev,
+ struct cqp_cmds_info *pcmdinfo)
+{
+ enum irdma_status_code status;
+ struct irdma_dma_mem val_mem;
+ bool alloc = false;
+
+ dev->cqp_cmd_stats[pcmdinfo->cqp_cmd]++;
+ switch (pcmdinfo->cqp_cmd) {
+ case IRDMA_OP_CEQ_DESTROY:
+ status = irdma_sc_ceq_destroy(pcmdinfo->in.u.ceq_destroy.ceq,
+ pcmdinfo->in.u.ceq_destroy.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_AEQ_DESTROY:
+ status = irdma_sc_aeq_destroy(pcmdinfo->in.u.aeq_destroy.aeq,
+ pcmdinfo->in.u.aeq_destroy.scratch,
+ pcmdinfo->post_sq);
+
+ break;
+ case IRDMA_OP_CEQ_CREATE:
+ status = irdma_sc_ceq_create(pcmdinfo->in.u.ceq_create.ceq,
+ pcmdinfo->in.u.ceq_create.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_AEQ_CREATE:
+ status = irdma_sc_aeq_create(pcmdinfo->in.u.aeq_create.aeq,
+ pcmdinfo->in.u.aeq_create.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_QP_UPLOAD_CONTEXT:
+ status = irdma_sc_qp_upload_context(pcmdinfo->in.u.qp_upload_context.dev,
+ &pcmdinfo->in.u.qp_upload_context.info,
+ pcmdinfo->in.u.qp_upload_context.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_CQ_CREATE:
+ status = irdma_sc_cq_create(pcmdinfo->in.u.cq_create.cq,
+ pcmdinfo->in.u.cq_create.scratch,
+ pcmdinfo->in.u.cq_create.check_overflow,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_CQ_MODIFY:
+ status = irdma_sc_cq_modify(pcmdinfo->in.u.cq_modify.cq,
+ &pcmdinfo->in.u.cq_modify.info,
+ pcmdinfo->in.u.cq_modify.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_CQ_DESTROY:
+ status = irdma_sc_cq_destroy(pcmdinfo->in.u.cq_destroy.cq,
+ pcmdinfo->in.u.cq_destroy.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_QP_FLUSH_WQES:
+ status = irdma_sc_qp_flush_wqes(pcmdinfo->in.u.qp_flush_wqes.qp,
+ &pcmdinfo->in.u.qp_flush_wqes.info,
+ pcmdinfo->in.u.qp_flush_wqes.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_GEN_AE:
+ status = irdma_sc_gen_ae(pcmdinfo->in.u.gen_ae.qp,
+ &pcmdinfo->in.u.gen_ae.info,
+ pcmdinfo->in.u.gen_ae.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_MANAGE_PUSH_PAGE:
+ status = irdma_sc_manage_push_page(pcmdinfo->in.u.manage_push_page.cqp,
+ &pcmdinfo->in.u.manage_push_page.info,
+ pcmdinfo->in.u.manage_push_page.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_UPDATE_PE_SDS:
+ status = irdma_update_pe_sds(pcmdinfo->in.u.update_pe_sds.dev,
+ &pcmdinfo->in.u.update_pe_sds.info,
+ pcmdinfo->in.u.update_pe_sds.scratch);
+ break;
+ case IRDMA_OP_MANAGE_HMC_PM_FUNC_TABLE:
+ /* switch to calling through the call table */
+ status =
+ irdma_sc_manage_hmc_pm_func_table(pcmdinfo->in.u.manage_hmc_pm.dev->cqp,
+ &pcmdinfo->in.u.manage_hmc_pm.info,
+ pcmdinfo->in.u.manage_hmc_pm.scratch,
+ true);
+ break;
+ case IRDMA_OP_SUSPEND:
+ status = irdma_sc_suspend_qp(pcmdinfo->in.u.suspend_resume.cqp,
+ pcmdinfo->in.u.suspend_resume.qp,
+ pcmdinfo->in.u.suspend_resume.scratch);
+ break;
+ case IRDMA_OP_RESUME:
+ status = irdma_sc_resume_qp(pcmdinfo->in.u.suspend_resume.cqp,
+ pcmdinfo->in.u.suspend_resume.qp,
+ pcmdinfo->in.u.suspend_resume.scratch);
+ break;
+ case IRDMA_OP_QUERY_FPM_VAL:
+ val_mem.pa = pcmdinfo->in.u.query_fpm_val.fpm_val_pa;
+ val_mem.va = pcmdinfo->in.u.query_fpm_val.fpm_val_va;
+ status = irdma_sc_query_fpm_val(pcmdinfo->in.u.query_fpm_val.cqp,
+ pcmdinfo->in.u.query_fpm_val.scratch,
+ pcmdinfo->in.u.query_fpm_val.hmc_fn_id,
+ &val_mem, true, IRDMA_CQP_WAIT_EVENT);
+ break;
+ case IRDMA_OP_COMMIT_FPM_VAL:
+ val_mem.pa = pcmdinfo->in.u.commit_fpm_val.fpm_val_pa;
+ val_mem.va = pcmdinfo->in.u.commit_fpm_val.fpm_val_va;
+ status = irdma_sc_commit_fpm_val(pcmdinfo->in.u.commit_fpm_val.cqp,
+ pcmdinfo->in.u.commit_fpm_val.scratch,
+ pcmdinfo->in.u.commit_fpm_val.hmc_fn_id,
+ &val_mem,
+ true,
+ IRDMA_CQP_WAIT_EVENT);
+ break;
+ case IRDMA_OP_STATS_ALLOCATE:
+ alloc = true;
+ fallthrough;
+ case IRDMA_OP_STATS_FREE:
+ status = irdma_sc_manage_stats_inst(pcmdinfo->in.u.stats_manage.cqp,
+ &pcmdinfo->in.u.stats_manage.info,
+ alloc,
+ pcmdinfo->in.u.stats_manage.scratch);
+ break;
+ case IRDMA_OP_STATS_GATHER:
+ status = irdma_sc_gather_stats(pcmdinfo->in.u.stats_gather.cqp,
+ &pcmdinfo->in.u.stats_gather.info,
+ pcmdinfo->in.u.stats_gather.scratch);
+ break;
+ case IRDMA_OP_WS_MODIFY_NODE:
+ status = irdma_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp,
+ &pcmdinfo->in.u.ws_node.info,
+ IRDMA_MODIFY_NODE,
+ pcmdinfo->in.u.ws_node.scratch);
+ break;
+ case IRDMA_OP_WS_DELETE_NODE:
+ status = irdma_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp,
+ &pcmdinfo->in.u.ws_node.info,
+ IRDMA_DEL_NODE,
+ pcmdinfo->in.u.ws_node.scratch);
+ break;
+ case IRDMA_OP_WS_ADD_NODE:
+ status = irdma_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp,
+ &pcmdinfo->in.u.ws_node.info,
+ IRDMA_ADD_NODE,
+ pcmdinfo->in.u.ws_node.scratch);
+ break;
+ case IRDMA_OP_SET_UP_MAP:
+ status = irdma_sc_set_up_map(pcmdinfo->in.u.up_map.cqp,
+ &pcmdinfo->in.u.up_map.info,
+ pcmdinfo->in.u.up_map.scratch);
+ break;
+ case IRDMA_OP_QUERY_RDMA_FEATURES:
+ status = irdma_sc_query_rdma_features(pcmdinfo->in.u.query_rdma.cqp,
+ &pcmdinfo->in.u.query_rdma.query_buff_mem,
+ pcmdinfo->in.u.query_rdma.scratch);
+ break;
+ case IRDMA_OP_DELETE_ARP_CACHE_ENTRY:
+ status = irdma_sc_del_arp_cache_entry(pcmdinfo->in.u.del_arp_cache_entry.cqp,
+ pcmdinfo->in.u.del_arp_cache_entry.scratch,
+ pcmdinfo->in.u.del_arp_cache_entry.arp_index,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_MANAGE_APBVT_ENTRY:
+ status = irdma_sc_manage_apbvt_entry(pcmdinfo->in.u.manage_apbvt_entry.cqp,
+ &pcmdinfo->in.u.manage_apbvt_entry.info,
+ pcmdinfo->in.u.manage_apbvt_entry.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY:
+ status = irdma_sc_manage_qhash_table_entry(pcmdinfo->in.u.manage_qhash_table_entry.cqp,
+ &pcmdinfo->in.u.manage_qhash_table_entry.info,
+ pcmdinfo->in.u.manage_qhash_table_entry.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_QP_MODIFY:
+ status = irdma_sc_qp_modify(pcmdinfo->in.u.qp_modify.qp,
+ &pcmdinfo->in.u.qp_modify.info,
+ pcmdinfo->in.u.qp_modify.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_QP_CREATE:
+ status = irdma_sc_qp_create(pcmdinfo->in.u.qp_create.qp,
+ &pcmdinfo->in.u.qp_create.info,
+ pcmdinfo->in.u.qp_create.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_QP_DESTROY:
+ status = irdma_sc_qp_destroy(pcmdinfo->in.u.qp_destroy.qp,
+ pcmdinfo->in.u.qp_destroy.scratch,
+ pcmdinfo->in.u.qp_destroy.remove_hash_idx,
+ pcmdinfo->in.u.qp_destroy.ignore_mw_bnd,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_ALLOC_STAG:
+ status = irdma_sc_alloc_stag(pcmdinfo->in.u.alloc_stag.dev,
+ &pcmdinfo->in.u.alloc_stag.info,
+ pcmdinfo->in.u.alloc_stag.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_MR_REG_NON_SHARED:
+ status = irdma_sc_mr_reg_non_shared(pcmdinfo->in.u.mr_reg_non_shared.dev,
+ &pcmdinfo->in.u.mr_reg_non_shared.info,
+ pcmdinfo->in.u.mr_reg_non_shared.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_DEALLOC_STAG:
+ status = irdma_sc_dealloc_stag(pcmdinfo->in.u.dealloc_stag.dev,
+ &pcmdinfo->in.u.dealloc_stag.info,
+ pcmdinfo->in.u.dealloc_stag.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_MW_ALLOC:
+ status = irdma_sc_mw_alloc(pcmdinfo->in.u.mw_alloc.dev,
+ &pcmdinfo->in.u.mw_alloc.info,
+ pcmdinfo->in.u.mw_alloc.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_ADD_ARP_CACHE_ENTRY:
+ status = irdma_sc_add_arp_cache_entry(pcmdinfo->in.u.add_arp_cache_entry.cqp,
+ &pcmdinfo->in.u.add_arp_cache_entry.info,
+ pcmdinfo->in.u.add_arp_cache_entry.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY:
+ status = irdma_sc_alloc_local_mac_entry(pcmdinfo->in.u.alloc_local_mac_entry.cqp,
+ pcmdinfo->in.u.alloc_local_mac_entry.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_ADD_LOCAL_MAC_ENTRY:
+ status = irdma_sc_add_local_mac_entry(pcmdinfo->in.u.add_local_mac_entry.cqp,
+ &pcmdinfo->in.u.add_local_mac_entry.info,
+ pcmdinfo->in.u.add_local_mac_entry.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_DELETE_LOCAL_MAC_ENTRY:
+ status = irdma_sc_del_local_mac_entry(pcmdinfo->in.u.del_local_mac_entry.cqp,
+ pcmdinfo->in.u.del_local_mac_entry.scratch,
+ pcmdinfo->in.u.del_local_mac_entry.entry_idx,
+ pcmdinfo->in.u.del_local_mac_entry.ignore_ref_count,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_AH_CREATE:
+ status = irdma_sc_create_ah(pcmdinfo->in.u.ah_create.cqp,
+ &pcmdinfo->in.u.ah_create.info,
+ pcmdinfo->in.u.ah_create.scratch);
+ break;
+ case IRDMA_OP_AH_DESTROY:
+ status = irdma_sc_destroy_ah(pcmdinfo->in.u.ah_destroy.cqp,
+ &pcmdinfo->in.u.ah_destroy.info,
+ pcmdinfo->in.u.ah_destroy.scratch);
+ break;
+ case IRDMA_OP_MC_CREATE:
+ status = irdma_sc_create_mcast_grp(pcmdinfo->in.u.mc_create.cqp,
+ &pcmdinfo->in.u.mc_create.info,
+ pcmdinfo->in.u.mc_create.scratch);
+ break;
+ case IRDMA_OP_MC_DESTROY:
+ status = irdma_sc_destroy_mcast_grp(pcmdinfo->in.u.mc_destroy.cqp,
+ &pcmdinfo->in.u.mc_destroy.info,
+ pcmdinfo->in.u.mc_destroy.scratch);
+ break;
+ case IRDMA_OP_MC_MODIFY:
+ status = irdma_sc_modify_mcast_grp(pcmdinfo->in.u.mc_modify.cqp,
+ &pcmdinfo->in.u.mc_modify.info,
+ pcmdinfo->in.u.mc_modify.scratch);
+ break;
+ default:
+ status = IRDMA_NOT_SUPPORTED;
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * irdma_process_cqp_cmd - process all cqp commands
+ * @dev: sc device struct
+ * @pcmdinfo: cqp command info
+ */
+enum irdma_status_code irdma_process_cqp_cmd(struct irdma_sc_dev *dev,
+ struct cqp_cmds_info *pcmdinfo)
+{
+ enum irdma_status_code status = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->cqp_lock, flags);
+ if (list_empty(&dev->cqp_cmd_head) && !irdma_cqp_ring_full(dev->cqp))
+ status = irdma_exec_cqp_cmd(dev, pcmdinfo);
+ else
+ list_add_tail(&pcmdinfo->cqp_cmd_entry, &dev->cqp_cmd_head);
+ spin_unlock_irqrestore(&dev->cqp_lock, flags);
+ return status;
+}
+
+/**
+ * irdma_process_bh - called from tasklet for cqp list
+ * @dev: sc device struct
+ */
+enum irdma_status_code irdma_process_bh(struct irdma_sc_dev *dev)
+{
+ enum irdma_status_code status = 0;
+ struct cqp_cmds_info *pcmdinfo;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->cqp_lock, flags);
+ while (!list_empty(&dev->cqp_cmd_head) &&
+ !irdma_cqp_ring_full(dev->cqp)) {
+ pcmdinfo = (struct cqp_cmds_info *)irdma_remove_cqp_head(dev);
+ status = irdma_exec_cqp_cmd(dev, pcmdinfo);
+ if (status)
+ break;
+ }
+ spin_unlock_irqrestore(&dev->cqp_lock, flags);
+ return status;
+}
+
+/**
+ * irdma_cfg_aeq- Configure AEQ interrupt
+ * @dev: pointer to the device structure
+ * @idx: vector index
+ * @enable: True to enable, False disables
+ */
+void irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable)
+{
+ u32 reg_val;
+
+ reg_val = FIELD_PREP(IRDMA_PFINT_AEQCTL_CAUSE_ENA, enable) |
+ FIELD_PREP(IRDMA_PFINT_AEQCTL_MSIX_INDX, idx) |
+ FIELD_PREP(IRDMA_PFINT_AEQCTL_ITR_INDX, 3);
+ writel(reg_val, dev->hw_regs[IRDMA_PFINT_AEQCTL]);
+}
+
+/**
+ * sc_vsi_update_stats - Update statistics
+ * @vsi: sc_vsi instance to update
+ */
+void sc_vsi_update_stats(struct irdma_sc_vsi *vsi)
+{
+ struct irdma_gather_stats *gather_stats;
+ struct irdma_gather_stats *last_gather_stats;
+
+ gather_stats = vsi->pestat->gather_info.gather_stats_va;
+ last_gather_stats = vsi->pestat->gather_info.last_gather_stats_va;
+ irdma_update_stats(&vsi->pestat->hw_stats, gather_stats,
+ last_gather_stats);
+}
+
+/**
+ * irdma_wait_pe_ready - Check if firmware is ready
+ * @dev: provides access to registers
+ */
+static int irdma_wait_pe_ready(struct irdma_sc_dev *dev)
+{
+ u32 statuscpu0;
+ u32 statuscpu1;
+ u32 statuscpu2;
+ u32 retrycount = 0;
+
+ do {
+ statuscpu0 = readl(dev->hw_regs[IRDMA_GLPE_CPUSTATUS0]);
+ statuscpu1 = readl(dev->hw_regs[IRDMA_GLPE_CPUSTATUS1]);
+ statuscpu2 = readl(dev->hw_regs[IRDMA_GLPE_CPUSTATUS2]);
+ if (statuscpu0 == 0x80 && statuscpu1 == 0x80 &&
+ statuscpu2 == 0x80)
+ return 0;
+ mdelay(1000);
+ } while (retrycount++ < dev->hw_attrs.max_pe_ready_count);
+ return -1;
+}
+
+static inline void irdma_sc_init_hw(struct irdma_sc_dev *dev)
+{
+ switch (dev->hw_attrs.uk_attrs.hw_rev) {
+ case IRDMA_GEN_1:
+ i40iw_init_hw(dev);
+ break;
+ case IRDMA_GEN_2:
+ icrdma_init_hw(dev);
+ break;
+ }
+}
+
+/**
+ * irdma_sc_dev_init - Initialize control part of device
+ * @ver: version
+ * @dev: Device pointer
+ * @info: Device init info
+ */
+enum irdma_status_code irdma_sc_dev_init(enum irdma_vers ver,
+ struct irdma_sc_dev *dev,
+ struct irdma_device_init_info *info)
+{
+ u32 val;
+ enum irdma_status_code ret_code = 0;
+ u8 db_size;
+
+ INIT_LIST_HEAD(&dev->cqp_cmd_head); /* for CQP command backlog */
+ mutex_init(&dev->ws_mutex);
+ dev->hmc_fn_id = info->hmc_fn_id;
+ dev->fpm_query_buf_pa = info->fpm_query_buf_pa;
+ dev->fpm_query_buf = info->fpm_query_buf;
+ dev->fpm_commit_buf_pa = info->fpm_commit_buf_pa;
+ dev->fpm_commit_buf = info->fpm_commit_buf;
+ dev->hw = info->hw;
+ dev->hw->hw_addr = info->bar0;
+ /* Setup the hardware limits, hmc may limit further */
+ dev->hw_attrs.min_hw_qp_id = IRDMA_MIN_IW_QP_ID;
+ dev->hw_attrs.min_hw_aeq_size = IRDMA_MIN_AEQ_ENTRIES;
+ dev->hw_attrs.max_hw_aeq_size = IRDMA_MAX_AEQ_ENTRIES;
+ dev->hw_attrs.min_hw_ceq_size = IRDMA_MIN_CEQ_ENTRIES;
+ dev->hw_attrs.max_hw_ceq_size = IRDMA_MAX_CEQ_ENTRIES;
+ dev->hw_attrs.uk_attrs.min_hw_cq_size = IRDMA_MIN_CQ_SIZE;
+ dev->hw_attrs.uk_attrs.max_hw_cq_size = IRDMA_MAX_CQ_SIZE;
+ dev->hw_attrs.uk_attrs.max_hw_wq_frags = IRDMA_MAX_WQ_FRAGMENT_COUNT;
+ dev->hw_attrs.uk_attrs.max_hw_read_sges = IRDMA_MAX_SGE_RD;
+ dev->hw_attrs.max_hw_outbound_msg_size = IRDMA_MAX_OUTBOUND_MSG_SIZE;
+ dev->hw_attrs.max_mr_size = IRDMA_MAX_MR_SIZE;
+ dev->hw_attrs.max_hw_inbound_msg_size = IRDMA_MAX_INBOUND_MSG_SIZE;
+ dev->hw_attrs.max_hw_device_pages = IRDMA_MAX_PUSH_PAGE_COUNT;
+ dev->hw_attrs.uk_attrs.max_hw_inline = IRDMA_MAX_INLINE_DATA_SIZE;
+ dev->hw_attrs.max_hw_wqes = IRDMA_MAX_WQ_ENTRIES;
+ dev->hw_attrs.max_qp_wr = IRDMA_MAX_QP_WRS(IRDMA_MAX_QUANTA_PER_WR);
+
+ dev->hw_attrs.uk_attrs.max_hw_rq_quanta = IRDMA_QP_SW_MAX_RQ_QUANTA;
+ dev->hw_attrs.uk_attrs.max_hw_wq_quanta = IRDMA_QP_SW_MAX_WQ_QUANTA;
+ dev->hw_attrs.max_hw_pds = IRDMA_MAX_PDS;
+ dev->hw_attrs.max_hw_ena_vf_count = IRDMA_MAX_PE_ENA_VF_COUNT;
+
+ dev->hw_attrs.max_pe_ready_count = 14;
+ dev->hw_attrs.max_done_count = IRDMA_DONE_COUNT;
+ dev->hw_attrs.max_sleep_count = IRDMA_SLEEP_COUNT;
+ dev->hw_attrs.max_cqp_compl_wait_time_ms = CQP_COMPL_WAIT_TIME_MS;
+
+ dev->hw_attrs.uk_attrs.hw_rev = ver;
+ irdma_sc_init_hw(dev);
+
+ if (irdma_wait_pe_ready(dev))
+ return IRDMA_ERR_TIMEOUT;
+
+ val = readl(dev->hw_regs[IRDMA_GLPCI_LBARCTRL]);
+ db_size = (u8)FIELD_GET(IRDMA_GLPCI_LBARCTRL_PE_DB_SIZE, val);
+ if (db_size != IRDMA_PE_DB_SIZE_4M && db_size != IRDMA_PE_DB_SIZE_8M) {
+ ibdev_dbg(to_ibdev(dev),
+ "DEV: RDMA PE doorbell is not enabled in CSR val 0x%x db_size=%d\n",
+ val, db_size);
+ return IRDMA_ERR_PE_DOORBELL_NOT_ENA;
+ }
+ dev->db_addr = dev->hw->hw_addr + (uintptr_t)dev->hw_regs[IRDMA_DB_ADDR_OFFSET];
+
+ return ret_code;
+}
+
+/**
+ * irdma_update_stats - Update statistics
+ * @hw_stats: hw_stats instance to update
+ * @gather_stats: updated stat counters
+ * @last_gather_stats: last stat counters
+ */
+void irdma_update_stats(struct irdma_dev_hw_stats *hw_stats,
+ struct irdma_gather_stats *gather_stats,
+ struct irdma_gather_stats *last_gather_stats)
+{
+ u64 *stats_val = hw_stats->stats_val_32;
+
+ stats_val[IRDMA_HW_STAT_INDEX_RXVLANERR] +=
+ IRDMA_STATS_DELTA(gather_stats->rxvlanerr,
+ last_gather_stats->rxvlanerr,
+ IRDMA_MAX_STATS_32);
+ stats_val[IRDMA_HW_STAT_INDEX_IP4RXDISCARD] +=
+ IRDMA_STATS_DELTA(gather_stats->ip4rxdiscard,
+ last_gather_stats->ip4rxdiscard,
+ IRDMA_MAX_STATS_32);
+ stats_val[IRDMA_HW_STAT_INDEX_IP4RXTRUNC] +=
+ IRDMA_STATS_DELTA(gather_stats->ip4rxtrunc,
+ last_gather_stats->ip4rxtrunc,
+ IRDMA_MAX_STATS_32);
+ stats_val[IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] +=
+ IRDMA_STATS_DELTA(gather_stats->ip4txnoroute,
+ last_gather_stats->ip4txnoroute,
+ IRDMA_MAX_STATS_32);
+ stats_val[IRDMA_HW_STAT_INDEX_IP6RXDISCARD] +=
+ IRDMA_STATS_DELTA(gather_stats->ip6rxdiscard,
+ last_gather_stats->ip6rxdiscard,
+ IRDMA_MAX_STATS_32);
+ stats_val[IRDMA_HW_STAT_INDEX_IP6RXTRUNC] +=
+ IRDMA_STATS_DELTA(gather_stats->ip6rxtrunc,
+ last_gather_stats->ip6rxtrunc,
+ IRDMA_MAX_STATS_32);
+ stats_val[IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] +=
+ IRDMA_STATS_DELTA(gather_stats->ip6txnoroute,
+ last_gather_stats->ip6txnoroute,
+ IRDMA_MAX_STATS_32);
+ stats_val[IRDMA_HW_STAT_INDEX_TCPRTXSEG] +=
+ IRDMA_STATS_DELTA(gather_stats->tcprtxseg,
+ last_gather_stats->tcprtxseg,
+ IRDMA_MAX_STATS_32);
+ stats_val[IRDMA_HW_STAT_INDEX_TCPRXOPTERR] +=
+ IRDMA_STATS_DELTA(gather_stats->tcprxopterr,
+ last_gather_stats->tcprxopterr,
+ IRDMA_MAX_STATS_32);
+ stats_val[IRDMA_HW_STAT_INDEX_TCPRXPROTOERR] +=
+ IRDMA_STATS_DELTA(gather_stats->tcprxprotoerr,
+ last_gather_stats->tcprxprotoerr,
+ IRDMA_MAX_STATS_32);
+ stats_val[IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED] +=
+ IRDMA_STATS_DELTA(gather_stats->rxrpcnphandled,
+ last_gather_stats->rxrpcnphandled,
+ IRDMA_MAX_STATS_32);
+ stats_val[IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED] +=
+ IRDMA_STATS_DELTA(gather_stats->rxrpcnpignored,
+ last_gather_stats->rxrpcnpignored,
+ IRDMA_MAX_STATS_32);
+ stats_val[IRDMA_HW_STAT_INDEX_TXNPCNPSENT] +=
+ IRDMA_STATS_DELTA(gather_stats->txnpcnpsent,
+ last_gather_stats->txnpcnpsent,
+ IRDMA_MAX_STATS_32);
+ stats_val = hw_stats->stats_val_64;
+ stats_val[IRDMA_HW_STAT_INDEX_IP4RXOCTS] +=
+ IRDMA_STATS_DELTA(gather_stats->ip4rxocts,
+ last_gather_stats->ip4rxocts,
+ IRDMA_MAX_STATS_48);
+ stats_val[IRDMA_HW_STAT_INDEX_IP4RXPKTS] +=
+ IRDMA_STATS_DELTA(gather_stats->ip4rxpkts,
+ last_gather_stats->ip4rxpkts,
+ IRDMA_MAX_STATS_48);
+ stats_val[IRDMA_HW_STAT_INDEX_IP4RXFRAGS] +=
+ IRDMA_STATS_DELTA(gather_stats->ip4txfrag,
+ last_gather_stats->ip4txfrag,
+ IRDMA_MAX_STATS_48);
+ stats_val[IRDMA_HW_STAT_INDEX_IP4RXMCPKTS] +=
+ IRDMA_STATS_DELTA(gather_stats->ip4rxmcpkts,
+ last_gather_stats->ip4rxmcpkts,
+ IRDMA_MAX_STATS_48);
+ stats_val[IRDMA_HW_STAT_INDEX_IP4TXOCTS] +=
+ IRDMA_STATS_DELTA(gather_stats->ip4txocts,
+ last_gather_stats->ip4txocts,
+ IRDMA_MAX_STATS_48);
+ stats_val[IRDMA_HW_STAT_INDEX_IP4TXPKTS] +=
+ IRDMA_STATS_DELTA(gather_stats->ip4txpkts,
+ last_gather_stats->ip4txpkts,
+ IRDMA_MAX_STATS_48);
+ stats_val[IRDMA_HW_STAT_INDEX_IP4TXFRAGS] +=
+ IRDMA_STATS_DELTA(gather_stats->ip4txfrag,
+ last_gather_stats->ip4txfrag,
+ IRDMA_MAX_STATS_48);
+ stats_val[IRDMA_HW_STAT_INDEX_IP4TXMCPKTS] +=
+ IRDMA_STATS_DELTA(gather_stats->ip4txmcpkts,
+ last_gather_stats->ip4txmcpkts,
+ IRDMA_MAX_STATS_48);
+ stats_val[IRDMA_HW_STAT_INDEX_IP6RXOCTS] +=
+ IRDMA_STATS_DELTA(gather_stats->ip6rxocts,
+ last_gather_stats->ip6rxocts,
+ IRDMA_MAX_STATS_48);
+ stats_val[IRDMA_HW_STAT_INDEX_IP6RXPKTS] +=
+ IRDMA_STATS_DELTA(gather_stats->ip6rxpkts,
+ last_gather_stats->ip6rxpkts,
+ IRDMA_MAX_STATS_48);
+ stats_val[IRDMA_HW_STAT_INDEX_IP6RXFRAGS] +=
+ IRDMA_STATS_DELTA(gather_stats->ip6txfrags,
+ last_gather_stats->ip6txfrags,
+ IRDMA_MAX_STATS_48);
+ stats_val[IRDMA_HW_STAT_INDEX_IP6RXMCPKTS] +=
+ IRDMA_STATS_DELTA(gather_stats->ip6rxmcpkts,
+ last_gather_stats->ip6rxmcpkts,
+ IRDMA_MAX_STATS_48);
+ stats_val[IRDMA_HW_STAT_INDEX_IP6TXOCTS] +=
+ IRDMA_STATS_DELTA(gather_stats->ip6txocts,
+ last_gather_stats->ip6txocts,
+ IRDMA_MAX_STATS_48);
+ stats_val[IRDMA_HW_STAT_INDEX_IP6TXPKTS] +=
+ IRDMA_STATS_DELTA(gather_stats->ip6txpkts,
+ last_gather_stats->ip6txpkts,
+ IRDMA_MAX_STATS_48);
+ stats_val[IRDMA_HW_STAT_INDEX_IP6TXFRAGS] +=
+ IRDMA_STATS_DELTA(gather_stats->ip6txfrags,
+ last_gather_stats->ip6txfrags,
+ IRDMA_MAX_STATS_48);
+ stats_val[IRDMA_HW_STAT_INDEX_IP6TXMCPKTS] +=
+ IRDMA_STATS_DELTA(gather_stats->ip6txmcpkts,
+ last_gather_stats->ip6txmcpkts,
+ IRDMA_MAX_STATS_48);
+ stats_val[IRDMA_HW_STAT_INDEX_TCPRXSEGS] +=
+ IRDMA_STATS_DELTA(gather_stats->tcprxsegs,
+ last_gather_stats->tcprxsegs,
+ IRDMA_MAX_STATS_48);
+ stats_val[IRDMA_HW_STAT_INDEX_TCPTXSEG] +=
+ IRDMA_STATS_DELTA(gather_stats->tcptxsegs,
+ last_gather_stats->tcptxsegs,
+ IRDMA_MAX_STATS_48);
+ stats_val[IRDMA_HW_STAT_INDEX_RDMARXRDS] +=
+ IRDMA_STATS_DELTA(gather_stats->rdmarxrds,
+ last_gather_stats->rdmarxrds,
+ IRDMA_MAX_STATS_48);
+ stats_val[IRDMA_HW_STAT_INDEX_RDMARXSNDS] +=
+ IRDMA_STATS_DELTA(gather_stats->rdmarxsnds,
+ last_gather_stats->rdmarxsnds,
+ IRDMA_MAX_STATS_48);
+ stats_val[IRDMA_HW_STAT_INDEX_RDMARXWRS] +=
+ IRDMA_STATS_DELTA(gather_stats->rdmarxwrs,
+ last_gather_stats->rdmarxwrs,
+ IRDMA_MAX_STATS_48);
+ stats_val[IRDMA_HW_STAT_INDEX_RDMATXRDS] +=
+ IRDMA_STATS_DELTA(gather_stats->rdmatxrds,
+ last_gather_stats->rdmatxrds,
+ IRDMA_MAX_STATS_48);
+ stats_val[IRDMA_HW_STAT_INDEX_RDMATXSNDS] +=
+ IRDMA_STATS_DELTA(gather_stats->rdmatxsnds,
+ last_gather_stats->rdmatxsnds,
+ IRDMA_MAX_STATS_48);
+ stats_val[IRDMA_HW_STAT_INDEX_RDMATXWRS] +=
+ IRDMA_STATS_DELTA(gather_stats->rdmatxwrs,
+ last_gather_stats->rdmatxwrs,
+ IRDMA_MAX_STATS_48);
+ stats_val[IRDMA_HW_STAT_INDEX_RDMAVBND] +=
+ IRDMA_STATS_DELTA(gather_stats->rdmavbn,
+ last_gather_stats->rdmavbn,
+ IRDMA_MAX_STATS_48);
+ stats_val[IRDMA_HW_STAT_INDEX_RDMAVINV] +=
+ IRDMA_STATS_DELTA(gather_stats->rdmavinv,
+ last_gather_stats->rdmavinv,
+ IRDMA_MAX_STATS_48);
+ stats_val[IRDMA_HW_STAT_INDEX_UDPRXPKTS] +=
+ IRDMA_STATS_DELTA(gather_stats->udprxpkts,
+ last_gather_stats->udprxpkts,
+ IRDMA_MAX_STATS_48);
+ stats_val[IRDMA_HW_STAT_INDEX_UDPTXPKTS] +=
+ IRDMA_STATS_DELTA(gather_stats->udptxpkts,
+ last_gather_stats->udptxpkts,
+ IRDMA_MAX_STATS_48);
+ stats_val[IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS] +=
+ IRDMA_STATS_DELTA(gather_stats->rxnpecnmrkpkts,
+ last_gather_stats->rxnpecnmrkpkts,
+ IRDMA_MAX_STATS_48);
+ memcpy(last_gather_stats, gather_stats, sizeof(*last_gather_stats));
+}
diff --git a/drivers/infiniband/hw/irdma/defs.h b/drivers/infiniband/hw/irdma/defs.h
new file mode 100644
index 000000000000..cc3d9a365b35
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/defs.h
@@ -0,0 +1,1155 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#ifndef IRDMA_DEFS_H
+#define IRDMA_DEFS_H
+
+#define IRDMA_FIRST_USER_QP_ID 3
+
+#define ECN_CODE_PT_VAL 2
+
+#define IRDMA_PUSH_OFFSET (8 * 1024 * 1024)
+#define IRDMA_PF_FIRST_PUSH_PAGE_INDEX 16
+#define IRDMA_PF_BAR_RSVD (60 * 1024)
+
+#define IRDMA_PE_DB_SIZE_4M 1
+#define IRDMA_PE_DB_SIZE_8M 2
+
+#define IRDMA_IRD_HW_SIZE_4 0
+#define IRDMA_IRD_HW_SIZE_16 1
+#define IRDMA_IRD_HW_SIZE_64 2
+#define IRDMA_IRD_HW_SIZE_128 3
+#define IRDMA_IRD_HW_SIZE_256 4
+
+enum irdma_protocol_used {
+ IRDMA_ANY_PROTOCOL = 0,
+ IRDMA_IWARP_PROTOCOL_ONLY = 1,
+ IRDMA_ROCE_PROTOCOL_ONLY = 2,
+};
+
+#define IRDMA_QP_STATE_INVALID 0
+#define IRDMA_QP_STATE_IDLE 1
+#define IRDMA_QP_STATE_RTS 2
+#define IRDMA_QP_STATE_CLOSING 3
+#define IRDMA_QP_STATE_SQD 3
+#define IRDMA_QP_STATE_RTR 4
+#define IRDMA_QP_STATE_TERMINATE 5
+#define IRDMA_QP_STATE_ERROR 6
+
+#define IRDMA_MAX_TRAFFIC_CLASS 8
+#define IRDMA_MAX_USER_PRIORITY 8
+#define IRDMA_MAX_APPS 8
+#define IRDMA_MAX_STATS_COUNT 128
+#define IRDMA_FIRST_NON_PF_STAT 4
+
+#define IRDMA_MIN_MTU_IPV4 576
+#define IRDMA_MIN_MTU_IPV6 1280
+#define IRDMA_MTU_TO_MSS_IPV4 40
+#define IRDMA_MTU_TO_MSS_IPV6 60
+#define IRDMA_DEFAULT_MTU 1500
+
+#define Q2_FPSN_OFFSET 64
+#define TERM_DDP_LEN_TAGGED 14
+#define TERM_DDP_LEN_UNTAGGED 18
+#define TERM_RDMA_LEN 28
+#define RDMA_OPCODE_M 0x0f
+#define RDMA_READ_REQ_OPCODE 1
+#define Q2_BAD_FRAME_OFFSET 72
+#define CQE_MAJOR_DRV 0x8000
+
+#define IRDMA_TERM_SENT 1
+#define IRDMA_TERM_RCVD 2
+#define IRDMA_TERM_DONE 4
+#define IRDMA_MAC_HLEN 14
+
+#define IRDMA_CQP_WAIT_POLL_REGS 1
+#define IRDMA_CQP_WAIT_POLL_CQ 2
+#define IRDMA_CQP_WAIT_EVENT 3
+
+#define IRDMA_AE_SOURCE_RSVD 0x0
+#define IRDMA_AE_SOURCE_RQ 0x1
+#define IRDMA_AE_SOURCE_RQ_0011 0x3
+
+#define IRDMA_AE_SOURCE_CQ 0x2
+#define IRDMA_AE_SOURCE_CQ_0110 0x6
+#define IRDMA_AE_SOURCE_CQ_1010 0xa
+#define IRDMA_AE_SOURCE_CQ_1110 0xe
+
+#define IRDMA_AE_SOURCE_SQ 0x5
+#define IRDMA_AE_SOURCE_SQ_0111 0x7
+
+#define IRDMA_AE_SOURCE_IN_RR_WR 0x9
+#define IRDMA_AE_SOURCE_IN_RR_WR_1011 0xb
+#define IRDMA_AE_SOURCE_OUT_RR 0xd
+#define IRDMA_AE_SOURCE_OUT_RR_1111 0xf
+
+#define IRDMA_TCP_STATE_NON_EXISTENT 0
+#define IRDMA_TCP_STATE_CLOSED 1
+#define IRDMA_TCP_STATE_LISTEN 2
+#define IRDMA_STATE_SYN_SEND 3
+#define IRDMA_TCP_STATE_SYN_RECEIVED 4
+#define IRDMA_TCP_STATE_ESTABLISHED 5
+#define IRDMA_TCP_STATE_CLOSE_WAIT 6
+#define IRDMA_TCP_STATE_FIN_WAIT_1 7
+#define IRDMA_TCP_STATE_CLOSING 8
+#define IRDMA_TCP_STATE_LAST_ACK 9
+#define IRDMA_TCP_STATE_FIN_WAIT_2 10
+#define IRDMA_TCP_STATE_TIME_WAIT 11
+#define IRDMA_TCP_STATE_RESERVED_1 12
+#define IRDMA_TCP_STATE_RESERVED_2 13
+#define IRDMA_TCP_STATE_RESERVED_3 14
+#define IRDMA_TCP_STATE_RESERVED_4 15
+
+#define IRDMA_CQP_SW_SQSIZE_4 4
+#define IRDMA_CQP_SW_SQSIZE_2048 2048
+
+#define IRDMA_CQ_TYPE_IWARP 1
+#define IRDMA_CQ_TYPE_ILQ 2
+#define IRDMA_CQ_TYPE_IEQ 3
+#define IRDMA_CQ_TYPE_CQP 4
+
+#define IRDMA_DONE_COUNT 1000
+#define IRDMA_SLEEP_COUNT 10
+
+#define IRDMA_UPDATE_SD_BUFF_SIZE 128
+#define IRDMA_FEATURE_BUF_SIZE (8 * IRDMA_MAX_FEATURES)
+
+#define IRDMA_MAX_QUANTA_PER_WR 8
+
+#define IRDMA_QP_SW_MAX_WQ_QUANTA 32768
+#define IRDMA_QP_SW_MAX_SQ_QUANTA 32768
+#define IRDMA_QP_SW_MAX_RQ_QUANTA 32768
+#define IRDMA_MAX_QP_WRS(max_quanta_per_wr) \
+ ((IRDMA_QP_SW_MAX_WQ_QUANTA - IRDMA_SQ_RSVD) / (max_quanta_per_wr))
+
+#define IRDMAQP_TERM_SEND_TERM_AND_FIN 0
+#define IRDMAQP_TERM_SEND_TERM_ONLY 1
+#define IRDMAQP_TERM_SEND_FIN_ONLY 2
+#define IRDMAQP_TERM_DONOT_SEND_TERM_OR_FIN 3
+
+#define IRDMA_QP_TYPE_IWARP 1
+#define IRDMA_QP_TYPE_UDA 2
+#define IRDMA_QP_TYPE_ROCE_RC 3
+#define IRDMA_QP_TYPE_ROCE_UD 4
+
+#define IRDMA_HW_PAGE_SIZE 4096
+#define IRDMA_HW_PAGE_SHIFT 12
+#define IRDMA_CQE_QTYPE_RQ 0
+#define IRDMA_CQE_QTYPE_SQ 1
+
+#define IRDMA_QP_SW_MIN_WQSIZE 8u /* in WRs*/
+#define IRDMA_QP_WQE_MIN_SIZE 32
+#define IRDMA_QP_WQE_MAX_SIZE 256
+#define IRDMA_QP_WQE_MIN_QUANTA 1
+#define IRDMA_MAX_RQ_WQE_SHIFT_GEN1 2
+#define IRDMA_MAX_RQ_WQE_SHIFT_GEN2 3
+
+#define IRDMA_SQ_RSVD 258
+#define IRDMA_RQ_RSVD 1
+
+#define IRDMA_FEATURE_RTS_AE 1ULL
+#define IRDMA_FEATURE_CQ_RESIZE 2ULL
+#define IRDMAQP_OP_RDMA_WRITE 0x00
+#define IRDMAQP_OP_RDMA_READ 0x01
+#define IRDMAQP_OP_RDMA_SEND 0x03
+#define IRDMAQP_OP_RDMA_SEND_INV 0x04
+#define IRDMAQP_OP_RDMA_SEND_SOL_EVENT 0x05
+#define IRDMAQP_OP_RDMA_SEND_SOL_EVENT_INV 0x06
+#define IRDMAQP_OP_BIND_MW 0x08
+#define IRDMAQP_OP_FAST_REGISTER 0x09
+#define IRDMAQP_OP_LOCAL_INVALIDATE 0x0a
+#define IRDMAQP_OP_RDMA_READ_LOC_INV 0x0b
+#define IRDMAQP_OP_NOP 0x0c
+#define IRDMAQP_OP_RDMA_WRITE_SOL 0x0d
+#define IRDMAQP_OP_GEN_RTS_AE 0x30
+
+enum irdma_cqp_op_type {
+ IRDMA_OP_CEQ_DESTROY = 1,
+ IRDMA_OP_AEQ_DESTROY = 2,
+ IRDMA_OP_DELETE_ARP_CACHE_ENTRY = 3,
+ IRDMA_OP_MANAGE_APBVT_ENTRY = 4,
+ IRDMA_OP_CEQ_CREATE = 5,
+ IRDMA_OP_AEQ_CREATE = 6,
+ IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY = 7,
+ IRDMA_OP_QP_MODIFY = 8,
+ IRDMA_OP_QP_UPLOAD_CONTEXT = 9,
+ IRDMA_OP_CQ_CREATE = 10,
+ IRDMA_OP_CQ_DESTROY = 11,
+ IRDMA_OP_QP_CREATE = 12,
+ IRDMA_OP_QP_DESTROY = 13,
+ IRDMA_OP_ALLOC_STAG = 14,
+ IRDMA_OP_MR_REG_NON_SHARED = 15,
+ IRDMA_OP_DEALLOC_STAG = 16,
+ IRDMA_OP_MW_ALLOC = 17,
+ IRDMA_OP_QP_FLUSH_WQES = 18,
+ IRDMA_OP_ADD_ARP_CACHE_ENTRY = 19,
+ IRDMA_OP_MANAGE_PUSH_PAGE = 20,
+ IRDMA_OP_UPDATE_PE_SDS = 21,
+ IRDMA_OP_MANAGE_HMC_PM_FUNC_TABLE = 22,
+ IRDMA_OP_SUSPEND = 23,
+ IRDMA_OP_RESUME = 24,
+ IRDMA_OP_MANAGE_VF_PBLE_BP = 25,
+ IRDMA_OP_QUERY_FPM_VAL = 26,
+ IRDMA_OP_COMMIT_FPM_VAL = 27,
+ IRDMA_OP_REQ_CMDS = 28,
+ IRDMA_OP_CMPL_CMDS = 29,
+ IRDMA_OP_AH_CREATE = 30,
+ IRDMA_OP_AH_MODIFY = 31,
+ IRDMA_OP_AH_DESTROY = 32,
+ IRDMA_OP_MC_CREATE = 33,
+ IRDMA_OP_MC_DESTROY = 34,
+ IRDMA_OP_MC_MODIFY = 35,
+ IRDMA_OP_STATS_ALLOCATE = 36,
+ IRDMA_OP_STATS_FREE = 37,
+ IRDMA_OP_STATS_GATHER = 38,
+ IRDMA_OP_WS_ADD_NODE = 39,
+ IRDMA_OP_WS_MODIFY_NODE = 40,
+ IRDMA_OP_WS_DELETE_NODE = 41,
+ IRDMA_OP_WS_FAILOVER_START = 42,
+ IRDMA_OP_WS_FAILOVER_COMPLETE = 43,
+ IRDMA_OP_SET_UP_MAP = 44,
+ IRDMA_OP_GEN_AE = 45,
+ IRDMA_OP_QUERY_RDMA_FEATURES = 46,
+ IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY = 47,
+ IRDMA_OP_ADD_LOCAL_MAC_ENTRY = 48,
+ IRDMA_OP_DELETE_LOCAL_MAC_ENTRY = 49,
+ IRDMA_OP_CQ_MODIFY = 50,
+
+ /* Must be last entry*/
+ IRDMA_MAX_CQP_OPS = 51,
+};
+
+/* CQP SQ WQES */
+#define IRDMA_CQP_OP_CREATE_QP 0
+#define IRDMA_CQP_OP_MODIFY_QP 0x1
+#define IRDMA_CQP_OP_DESTROY_QP 0x02
+#define IRDMA_CQP_OP_CREATE_CQ 0x03
+#define IRDMA_CQP_OP_MODIFY_CQ 0x04
+#define IRDMA_CQP_OP_DESTROY_CQ 0x05
+#define IRDMA_CQP_OP_ALLOC_STAG 0x09
+#define IRDMA_CQP_OP_REG_MR 0x0a
+#define IRDMA_CQP_OP_QUERY_STAG 0x0b
+#define IRDMA_CQP_OP_REG_SMR 0x0c
+#define IRDMA_CQP_OP_DEALLOC_STAG 0x0d
+#define IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE 0x0e
+#define IRDMA_CQP_OP_MANAGE_ARP 0x0f
+#define IRDMA_CQP_OP_MANAGE_VF_PBLE_BP 0x10
+#define IRDMA_CQP_OP_MANAGE_PUSH_PAGES 0x11
+#define IRDMA_CQP_OP_QUERY_RDMA_FEATURES 0x12
+#define IRDMA_CQP_OP_UPLOAD_CONTEXT 0x13
+#define IRDMA_CQP_OP_ALLOCATE_LOC_MAC_TABLE_ENTRY 0x14
+#define IRDMA_CQP_OP_UPLOAD_CONTEXT 0x13
+#define IRDMA_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE 0x15
+#define IRDMA_CQP_OP_CREATE_CEQ 0x16
+#define IRDMA_CQP_OP_DESTROY_CEQ 0x18
+#define IRDMA_CQP_OP_CREATE_AEQ 0x19
+#define IRDMA_CQP_OP_DESTROY_AEQ 0x1b
+#define IRDMA_CQP_OP_CREATE_ADDR_HANDLE 0x1c
+#define IRDMA_CQP_OP_MODIFY_ADDR_HANDLE 0x1d
+#define IRDMA_CQP_OP_DESTROY_ADDR_HANDLE 0x1e
+#define IRDMA_CQP_OP_UPDATE_PE_SDS 0x1f
+#define IRDMA_CQP_OP_QUERY_FPM_VAL 0x20
+#define IRDMA_CQP_OP_COMMIT_FPM_VAL 0x21
+#define IRDMA_CQP_OP_FLUSH_WQES 0x22
+/* IRDMA_CQP_OP_GEN_AE is the same value as IRDMA_CQP_OP_FLUSH_WQES */
+#define IRDMA_CQP_OP_GEN_AE 0x22
+#define IRDMA_CQP_OP_MANAGE_APBVT 0x23
+#define IRDMA_CQP_OP_NOP 0x24
+#define IRDMA_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY 0x25
+#define IRDMA_CQP_OP_CREATE_MCAST_GRP 0x26
+#define IRDMA_CQP_OP_MODIFY_MCAST_GRP 0x27
+#define IRDMA_CQP_OP_DESTROY_MCAST_GRP 0x28
+#define IRDMA_CQP_OP_SUSPEND_QP 0x29
+#define IRDMA_CQP_OP_RESUME_QP 0x2a
+#define IRDMA_CQP_OP_SHMC_PAGES_ALLOCATED 0x2b
+#define IRDMA_CQP_OP_WORK_SCHED_NODE 0x2c
+#define IRDMA_CQP_OP_MANAGE_STATS 0x2d
+#define IRDMA_CQP_OP_GATHER_STATS 0x2e
+#define IRDMA_CQP_OP_UP_MAP 0x2f
+
+/* Async Events codes */
+#define IRDMA_AE_AMP_UNALLOCATED_STAG 0x0102
+#define IRDMA_AE_AMP_INVALID_STAG 0x0103
+#define IRDMA_AE_AMP_BAD_QP 0x0104
+#define IRDMA_AE_AMP_BAD_PD 0x0105
+#define IRDMA_AE_AMP_BAD_STAG_KEY 0x0106
+#define IRDMA_AE_AMP_BAD_STAG_INDEX 0x0107
+#define IRDMA_AE_AMP_BOUNDS_VIOLATION 0x0108
+#define IRDMA_AE_AMP_RIGHTS_VIOLATION 0x0109
+#define IRDMA_AE_AMP_TO_WRAP 0x010a
+#define IRDMA_AE_AMP_FASTREG_VALID_STAG 0x010c
+#define IRDMA_AE_AMP_FASTREG_MW_STAG 0x010d
+#define IRDMA_AE_AMP_FASTREG_INVALID_RIGHTS 0x010e
+#define IRDMA_AE_AMP_FASTREG_INVALID_LENGTH 0x0110
+#define IRDMA_AE_AMP_INVALIDATE_SHARED 0x0111
+#define IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS 0x0112
+#define IRDMA_AE_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS 0x0113
+#define IRDMA_AE_AMP_MWBIND_VALID_STAG 0x0114
+#define IRDMA_AE_AMP_MWBIND_OF_MR_STAG 0x0115
+#define IRDMA_AE_AMP_MWBIND_TO_ZERO_BASED_STAG 0x0116
+#define IRDMA_AE_AMP_MWBIND_TO_MW_STAG 0x0117
+#define IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS 0x0118
+#define IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS 0x0119
+#define IRDMA_AE_AMP_MWBIND_TO_INVALID_PARENT 0x011a
+#define IRDMA_AE_AMP_MWBIND_BIND_DISABLED 0x011b
+#define IRDMA_AE_PRIV_OPERATION_DENIED 0x011c
+#define IRDMA_AE_AMP_INVALIDATE_TYPE1_MW 0x011d
+#define IRDMA_AE_AMP_MWBIND_ZERO_BASED_TYPE1_MW 0x011e
+#define IRDMA_AE_AMP_FASTREG_INVALID_PBL_HPS_CFG 0x011f
+#define IRDMA_AE_AMP_MWBIND_WRONG_TYPE 0x0120
+#define IRDMA_AE_AMP_FASTREG_PBLE_MISMATCH 0x0121
+#define IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG 0x0132
+#define IRDMA_AE_UDA_XMIT_BAD_PD 0x0133
+#define IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT 0x0134
+#define IRDMA_AE_UDA_L4LEN_INVALID 0x0135
+#define IRDMA_AE_BAD_CLOSE 0x0201
+#define IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE 0x0202
+#define IRDMA_AE_CQ_OPERATION_ERROR 0x0203
+#define IRDMA_AE_RDMA_READ_WHILE_ORD_ZERO 0x0205
+#define IRDMA_AE_STAG_ZERO_INVALID 0x0206
+#define IRDMA_AE_IB_RREQ_AND_Q1_FULL 0x0207
+#define IRDMA_AE_IB_INVALID_REQUEST 0x0208
+#define IRDMA_AE_WQE_UNEXPECTED_OPCODE 0x020a
+#define IRDMA_AE_WQE_INVALID_PARAMETER 0x020b
+#define IRDMA_AE_WQE_INVALID_FRAG_DATA 0x020c
+#define IRDMA_AE_IB_REMOTE_ACCESS_ERROR 0x020d
+#define IRDMA_AE_IB_REMOTE_OP_ERROR 0x020e
+#define IRDMA_AE_WQE_LSMM_TOO_LONG 0x0220
+#define IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN 0x0301
+#define IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER 0x0303
+#define IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION 0x0304
+#define IRDMA_AE_DDP_UBE_INVALID_MO 0x0305
+#define IRDMA_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE 0x0306
+#define IRDMA_AE_DDP_UBE_INVALID_QN 0x0307
+#define IRDMA_AE_DDP_NO_L_BIT 0x0308
+#define IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION 0x0311
+#define IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE 0x0312
+#define IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST 0x0313
+#define IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP 0x0314
+#define IRDMA_AE_ROCE_RSP_LENGTH_ERROR 0x0316
+#define IRDMA_AE_ROCE_EMPTY_MCG 0x0380
+#define IRDMA_AE_ROCE_BAD_MC_IP_ADDR 0x0381
+#define IRDMA_AE_ROCE_BAD_MC_QPID 0x0382
+#define IRDMA_AE_MCG_QP_PROTOCOL_MISMATCH 0x0383
+#define IRDMA_AE_INVALID_ARP_ENTRY 0x0401
+#define IRDMA_AE_INVALID_TCP_OPTION_RCVD 0x0402
+#define IRDMA_AE_STALE_ARP_ENTRY 0x0403
+#define IRDMA_AE_INVALID_AH_ENTRY 0x0406
+#define IRDMA_AE_LLP_CLOSE_COMPLETE 0x0501
+#define IRDMA_AE_LLP_CONNECTION_RESET 0x0502
+#define IRDMA_AE_LLP_FIN_RECEIVED 0x0503
+#define IRDMA_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH 0x0504
+#define IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR 0x0505
+#define IRDMA_AE_LLP_SEGMENT_TOO_SMALL 0x0507
+#define IRDMA_AE_LLP_SYN_RECEIVED 0x0508
+#define IRDMA_AE_LLP_TERMINATE_RECEIVED 0x0509
+#define IRDMA_AE_LLP_TOO_MANY_RETRIES 0x050a
+#define IRDMA_AE_LLP_TOO_MANY_KEEPALIVE_RETRIES 0x050b
+#define IRDMA_AE_LLP_DOUBT_REACHABILITY 0x050c
+#define IRDMA_AE_LLP_CONNECTION_ESTABLISHED 0x050e
+#define IRDMA_AE_RESOURCE_EXHAUSTION 0x0520
+#define IRDMA_AE_RESET_SENT 0x0601
+#define IRDMA_AE_TERMINATE_SENT 0x0602
+#define IRDMA_AE_RESET_NOT_SENT 0x0603
+#define IRDMA_AE_LCE_QP_CATASTROPHIC 0x0700
+#define IRDMA_AE_LCE_FUNCTION_CATASTROPHIC 0x0701
+#define IRDMA_AE_LCE_CQ_CATASTROPHIC 0x0702
+#define IRDMA_AE_QP_SUSPEND_COMPLETE 0x0900
+
+#define FLD_LS_64(dev, val, field) \
+ (((u64)(val) << (dev)->hw_shifts[field ## _S]) & (dev)->hw_masks[field ## _M])
+#define FLD_RS_64(dev, val, field) \
+ ((u64)((val) & (dev)->hw_masks[field ## _M]) >> (dev)->hw_shifts[field ## _S])
+#define FLD_LS_32(dev, val, field) \
+ (((val) << (dev)->hw_shifts[field ## _S]) & (dev)->hw_masks[field ## _M])
+#define FLD_RS_32(dev, val, field) \
+ ((u64)((val) & (dev)->hw_masks[field ## _M]) >> (dev)->hw_shifts[field ## _S])
+
+#define IRDMA_STATS_DELTA(a, b, c) ((a) >= (b) ? (a) - (b) : (a) + (c) - (b))
+#define IRDMA_MAX_STATS_32 0xFFFFFFFFULL
+#define IRDMA_MAX_STATS_48 0xFFFFFFFFFFFFULL
+
+#define IRDMA_MAX_CQ_READ_THRESH 0x3FFFF
+#define IRDMA_CQPSQ_QHASH_VLANID GENMASK_ULL(43, 32)
+#define IRDMA_CQPSQ_QHASH_QPN GENMASK_ULL(49, 32)
+#define IRDMA_CQPSQ_QHASH_QS_HANDLE GENMASK_ULL(9, 0)
+#define IRDMA_CQPSQ_QHASH_SRC_PORT GENMASK_ULL(31, 16)
+#define IRDMA_CQPSQ_QHASH_DEST_PORT GENMASK_ULL(15, 0)
+#define IRDMA_CQPSQ_QHASH_ADDR0 GENMASK_ULL(63, 32)
+#define IRDMA_CQPSQ_QHASH_ADDR1 GENMASK_ULL(31, 0)
+#define IRDMA_CQPSQ_QHASH_ADDR2 GENMASK_ULL(63, 32)
+#define IRDMA_CQPSQ_QHASH_ADDR3 GENMASK_ULL(31, 0)
+#define IRDMA_CQPSQ_QHASH_WQEVALID BIT_ULL(63)
+#define IRDMA_CQPSQ_QHASH_OPCODE GENMASK_ULL(37, 32)
+#define IRDMA_CQPSQ_QHASH_MANAGE GENMASK_ULL(62, 61)
+#define IRDMA_CQPSQ_QHASH_IPV4VALID BIT_ULL(60)
+#define IRDMA_CQPSQ_QHASH_VLANVALID BIT_ULL(59)
+#define IRDMA_CQPSQ_QHASH_ENTRYTYPE GENMASK_ULL(44, 42)
+#define IRDMA_CQPSQ_STATS_WQEVALID BIT_ULL(63)
+#define IRDMA_CQPSQ_STATS_ALLOC_INST BIT_ULL(62)
+#define IRDMA_CQPSQ_STATS_USE_HMC_FCN_INDEX BIT_ULL(60)
+#define IRDMA_CQPSQ_STATS_USE_INST BIT_ULL(61)
+#define IRDMA_CQPSQ_STATS_OP GENMASK_ULL(37, 32)
+#define IRDMA_CQPSQ_STATS_INST_INDEX GENMASK_ULL(6, 0)
+#define IRDMA_CQPSQ_STATS_HMC_FCN_INDEX GENMASK_ULL(5, 0)
+#define IRDMA_CQPSQ_WS_WQEVALID BIT_ULL(63)
+#define IRDMA_CQPSQ_WS_NODEOP GENMASK_ULL(53, 52)
+
+#define IRDMA_CQPSQ_WS_ENABLENODE BIT_ULL(62)
+#define IRDMA_CQPSQ_WS_NODETYPE BIT_ULL(61)
+#define IRDMA_CQPSQ_WS_PRIOTYPE GENMASK_ULL(60, 59)
+#define IRDMA_CQPSQ_WS_TC GENMASK_ULL(58, 56)
+#define IRDMA_CQPSQ_WS_VMVFTYPE GENMASK_ULL(55, 54)
+#define IRDMA_CQPSQ_WS_VMVFNUM GENMASK_ULL(51, 42)
+#define IRDMA_CQPSQ_WS_OP GENMASK_ULL(37, 32)
+#define IRDMA_CQPSQ_WS_PARENTID GENMASK_ULL(25, 16)
+#define IRDMA_CQPSQ_WS_NODEID GENMASK_ULL(9, 0)
+#define IRDMA_CQPSQ_WS_VSI GENMASK_ULL(57, 48)
+#define IRDMA_CQPSQ_WS_WEIGHT GENMASK_ULL(38, 32)
+
+#define IRDMA_CQPSQ_UP_WQEVALID BIT_ULL(63)
+#define IRDMA_CQPSQ_UP_USEVLAN BIT_ULL(62)
+#define IRDMA_CQPSQ_UP_USEOVERRIDE BIT_ULL(61)
+#define IRDMA_CQPSQ_UP_OP GENMASK_ULL(37, 32)
+#define IRDMA_CQPSQ_UP_HMCFCNIDX GENMASK_ULL(5, 0)
+#define IRDMA_CQPSQ_UP_CNPOVERRIDE GENMASK_ULL(37, 32)
+#define IRDMA_CQPSQ_QUERY_RDMA_FEATURES_WQEVALID BIT_ULL(63)
+#define IRDMA_CQPSQ_QUERY_RDMA_FEATURES_BUF_LEN GENMASK_ULL(31, 0)
+#define IRDMA_CQPSQ_QUERY_RDMA_FEATURES_OP GENMASK_ULL(37, 32)
+#define IRDMA_CQPSQ_QUERY_RDMA_FEATURES_HW_MODEL_USED GENMASK_ULL(47, 32)
+#define IRDMA_CQPSQ_QUERY_RDMA_FEATURES_HW_MAJOR_VERSION GENMASK_ULL(23, 16)
+#define IRDMA_CQPSQ_QUERY_RDMA_FEATURES_HW_MINOR_VERSION GENMASK_ULL(7, 0)
+#define IRDMA_CQPHC_SQSIZE GENMASK_ULL(11, 8)
+#define IRDMA_CQPHC_DISABLE_PFPDUS BIT_ULL(1)
+#define IRDMA_CQPHC_ROCEV2_RTO_POLICY BIT_ULL(2)
+#define IRDMA_CQPHC_PROTOCOL_USED GENMASK_ULL(4, 3)
+#define IRDMA_CQPHC_MIN_RATE GENMASK_ULL(51, 48)
+#define IRDMA_CQPHC_MIN_DEC_FACTOR GENMASK_ULL(59, 56)
+#define IRDMA_CQPHC_DCQCN_T GENMASK_ULL(15, 0)
+#define IRDMA_CQPHC_HAI_FACTOR GENMASK_ULL(47, 32)
+#define IRDMA_CQPHC_RAI_FACTOR GENMASK_ULL(63, 48)
+#define IRDMA_CQPHC_DCQCN_B GENMASK_ULL(24, 0)
+#define IRDMA_CQPHC_DCQCN_F GENMASK_ULL(27, 25)
+#define IRDMA_CQPHC_CC_CFG_VALID BIT_ULL(31)
+#define IRDMA_CQPHC_RREDUCE_MPERIOD GENMASK_ULL(63, 32)
+#define IRDMA_CQPHC_HW_MINVER GENMASK_ULL(15, 0)
+
+#define IRDMA_CQPHC_HW_MAJVER_GEN_1 0
+#define IRDMA_CQPHC_HW_MAJVER_GEN_2 1
+#define IRDMA_CQPHC_HW_MAJVER_GEN_3 2
+#define IRDMA_CQPHC_HW_MAJVER GENMASK_ULL(31, 16)
+#define IRDMA_CQPHC_CEQPERVF GENMASK_ULL(39, 32)
+
+#define IRDMA_CQPHC_ENABLED_VFS GENMASK_ULL(37, 32)
+
+#define IRDMA_CQPHC_HMC_PROFILE GENMASK_ULL(2, 0)
+#define IRDMA_CQPHC_SVER GENMASK_ULL(31, 24)
+#define IRDMA_CQPHC_SQBASE GENMASK_ULL(63, 9)
+
+#define IRDMA_CQPHC_QPCTX GENMASK_ULL(63, 0)
+#define IRDMA_QP_DBSA_HW_SQ_TAIL GENMASK_ULL(14, 0)
+#define IRDMA_CQ_DBSA_CQEIDX GENMASK_ULL(19, 0)
+#define IRDMA_CQ_DBSA_SW_CQ_SELECT GENMASK_ULL(13, 0)
+#define IRDMA_CQ_DBSA_ARM_NEXT BIT_ULL(14)
+#define IRDMA_CQ_DBSA_ARM_NEXT_SE BIT_ULL(15)
+#define IRDMA_CQ_DBSA_ARM_SEQ_NUM GENMASK_ULL(17, 16)
+
+/* CQP and iWARP Completion Queue */
+#define IRDMA_CQ_QPCTX IRDMA_CQPHC_QPCTX
+
+#define IRDMA_CCQ_OPRETVAL GENMASK_ULL(31, 0)
+
+#define IRDMA_CQ_MINERR GENMASK_ULL(15, 0)
+#define IRDMA_CQ_MAJERR GENMASK_ULL(31, 16)
+#define IRDMA_CQ_WQEIDX GENMASK_ULL(46, 32)
+#define IRDMA_CQ_EXTCQE BIT_ULL(50)
+#define IRDMA_OOO_CMPL BIT_ULL(54)
+#define IRDMA_CQ_ERROR BIT_ULL(55)
+#define IRDMA_CQ_SQ BIT_ULL(62)
+
+#define IRDMA_CQ_VALID BIT_ULL(63)
+#define IRDMA_CQ_IMMVALID BIT_ULL(62)
+#define IRDMA_CQ_UDSMACVALID BIT_ULL(61)
+#define IRDMA_CQ_UDVLANVALID BIT_ULL(60)
+#define IRDMA_CQ_UDSMAC GENMASK_ULL(47, 0)
+#define IRDMA_CQ_UDVLAN GENMASK_ULL(63, 48)
+
+#define IRDMA_CQ_IMMDATA_S 0
+#define IRDMA_CQ_IMMDATA_M (0xffffffffffffffffULL << IRDMA_CQ_IMMVALID_S)
+#define IRDMA_CQ_IMMDATALOW32 GENMASK_ULL(31, 0)
+#define IRDMA_CQ_IMMDATAUP32 GENMASK_ULL(63, 32)
+#define IRDMACQ_PAYLDLEN GENMASK_ULL(31, 0)
+#define IRDMACQ_TCPSEQNUMRTT GENMASK_ULL(63, 32)
+#define IRDMACQ_INVSTAG GENMASK_ULL(31, 0)
+#define IRDMACQ_QPID GENMASK_ULL(55, 32)
+
+#define IRDMACQ_UDSRCQPN GENMASK_ULL(31, 0)
+#define IRDMACQ_PSHDROP BIT_ULL(51)
+#define IRDMACQ_STAG BIT_ULL(53)
+#define IRDMACQ_IPV4 BIT_ULL(53)
+#define IRDMACQ_SOEVENT BIT_ULL(54)
+#define IRDMACQ_OP GENMASK_ULL(61, 56)
+
+#define IRDMA_CEQE_CQCTX GENMASK_ULL(62, 0)
+#define IRDMA_CEQE_VALID BIT_ULL(63)
+
+/* AEQE format */
+#define IRDMA_AEQE_COMPCTX IRDMA_CQPHC_QPCTX
+#define IRDMA_AEQE_QPCQID_LOW GENMASK_ULL(17, 0)
+#define IRDMA_AEQE_QPCQID_HI BIT_ULL(46)
+#define IRDMA_AEQE_WQDESCIDX GENMASK_ULL(32, 18)
+#define IRDMA_AEQE_OVERFLOW BIT_ULL(33)
+#define IRDMA_AEQE_AECODE GENMASK_ULL(45, 34)
+#define IRDMA_AEQE_AESRC GENMASK_ULL(53, 50)
+#define IRDMA_AEQE_IWSTATE GENMASK_ULL(56, 54)
+#define IRDMA_AEQE_TCPSTATE GENMASK_ULL(60, 57)
+#define IRDMA_AEQE_Q2DATA GENMASK_ULL(62, 61)
+#define IRDMA_AEQE_VALID BIT_ULL(63)
+
+#define IRDMA_UDA_QPSQ_NEXT_HDR GENMASK_ULL(23, 16)
+#define IRDMA_UDA_QPSQ_OPCODE GENMASK_ULL(37, 32)
+#define IRDMA_UDA_QPSQ_L4LEN GENMASK_ULL(45, 42)
+#define IRDMA_GEN1_UDA_QPSQ_L4LEN GENMASK_ULL(27, 24)
+#define IRDMA_UDA_QPSQ_AHIDX GENMASK_ULL(16, 0)
+#define IRDMA_UDA_QPSQ_VALID BIT_ULL(63)
+#define IRDMA_UDA_QPSQ_SIGCOMPL BIT_ULL(62)
+#define IRDMA_UDA_QPSQ_MACLEN GENMASK_ULL(62, 56)
+#define IRDMA_UDA_QPSQ_IPLEN GENMASK_ULL(54, 48)
+#define IRDMA_UDA_QPSQ_L4T GENMASK_ULL(31, 30)
+#define IRDMA_UDA_QPSQ_IIPT GENMASK_ULL(29, 28)
+#define IRDMA_UDA_PAYLOADLEN GENMASK_ULL(13, 0)
+#define IRDMA_UDA_HDRLEN GENMASK_ULL(24, 16)
+#define IRDMA_VLAN_TAG_VALID BIT_ULL(50)
+#define IRDMA_UDA_L3PROTO GENMASK_ULL(1, 0)
+#define IRDMA_UDA_L4PROTO GENMASK_ULL(17, 16)
+#define IRDMA_UDA_QPSQ_DOLOOPBACK BIT_ULL(44)
+#define IRDMA_CQPSQ_BUFSIZE GENMASK_ULL(31, 0)
+#define IRDMA_CQPSQ_OPCODE GENMASK_ULL(37, 32)
+#define IRDMA_CQPSQ_WQEVALID BIT_ULL(63)
+#define IRDMA_CQPSQ_TPHVAL GENMASK_ULL(7, 0)
+
+#define IRDMA_CQPSQ_VSIIDX GENMASK_ULL(17, 8)
+#define IRDMA_CQPSQ_TPHEN BIT_ULL(60)
+
+#define IRDMA_CQPSQ_PBUFADDR IRDMA_CQPHC_QPCTX
+
+/* Create/Modify/Destroy QP */
+
+#define IRDMA_CQPSQ_QP_NEWMSS GENMASK_ULL(45, 32)
+#define IRDMA_CQPSQ_QP_TERMLEN GENMASK_ULL(51, 48)
+
+#define IRDMA_CQPSQ_QP_QPCTX IRDMA_CQPHC_QPCTX
+
+#define IRDMA_CQPSQ_QP_QPID_S 0
+#define IRDMA_CQPSQ_QP_QPID_M (0xFFFFFFUL)
+
+#define IRDMA_CQPSQ_QP_OP_S 32
+#define IRDMA_CQPSQ_QP_OP_M IRDMACQ_OP_M
+#define IRDMA_CQPSQ_QP_ORDVALID BIT_ULL(42)
+#define IRDMA_CQPSQ_QP_TOECTXVALID BIT_ULL(43)
+#define IRDMA_CQPSQ_QP_CACHEDVARVALID BIT_ULL(44)
+#define IRDMA_CQPSQ_QP_VQ BIT_ULL(45)
+#define IRDMA_CQPSQ_QP_FORCELOOPBACK BIT_ULL(46)
+#define IRDMA_CQPSQ_QP_CQNUMVALID BIT_ULL(47)
+#define IRDMA_CQPSQ_QP_QPTYPE GENMASK_ULL(50, 48)
+#define IRDMA_CQPSQ_QP_MACVALID BIT_ULL(51)
+#define IRDMA_CQPSQ_QP_MSSCHANGE BIT_ULL(52)
+
+#define IRDMA_CQPSQ_QP_IGNOREMWBOUND BIT_ULL(54)
+#define IRDMA_CQPSQ_QP_REMOVEHASHENTRY BIT_ULL(55)
+#define IRDMA_CQPSQ_QP_TERMACT GENMASK_ULL(57, 56)
+#define IRDMA_CQPSQ_QP_RESETCON BIT_ULL(58)
+#define IRDMA_CQPSQ_QP_ARPTABIDXVALID BIT_ULL(59)
+#define IRDMA_CQPSQ_QP_NEXTIWSTATE GENMASK_ULL(62, 60)
+
+#define IRDMA_CQPSQ_QP_DBSHADOWADDR IRDMA_CQPHC_QPCTX
+
+#define IRDMA_CQPSQ_CQ_CQSIZE GENMASK_ULL(20, 0)
+#define IRDMA_CQPSQ_CQ_CQCTX GENMASK_ULL(62, 0)
+#define IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD GENMASK(17, 0)
+
+#define IRDMA_CQPSQ_CQ_OP GENMASK_ULL(37, 32)
+#define IRDMA_CQPSQ_CQ_CQRESIZE BIT_ULL(43)
+#define IRDMA_CQPSQ_CQ_LPBLSIZE GENMASK_ULL(45, 44)
+#define IRDMA_CQPSQ_CQ_CHKOVERFLOW BIT_ULL(46)
+#define IRDMA_CQPSQ_CQ_VIRTMAP BIT_ULL(47)
+#define IRDMA_CQPSQ_CQ_ENCEQEMASK BIT_ULL(48)
+#define IRDMA_CQPSQ_CQ_CEQIDVALID BIT_ULL(49)
+#define IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT BIT_ULL(61)
+#define IRDMA_CQPSQ_CQ_FIRSTPMPBLIDX GENMASK_ULL(27, 0)
+
+/* Allocate/Register/Register Shared/Deallocate Stag */
+#define IRDMA_CQPSQ_STAG_VA_FBO IRDMA_CQPHC_QPCTX
+#define IRDMA_CQPSQ_STAG_STAGLEN GENMASK_ULL(45, 0)
+#define IRDMA_CQPSQ_STAG_KEY GENMASK_ULL(7, 0)
+#define IRDMA_CQPSQ_STAG_IDX GENMASK_ULL(31, 8)
+#define IRDMA_CQPSQ_STAG_IDX_S 8
+#define IRDMA_CQPSQ_STAG_PARENTSTAGIDX GENMASK_ULL(55, 32)
+#define IRDMA_CQPSQ_STAG_MR BIT_ULL(43)
+#define IRDMA_CQPSQ_STAG_MWTYPE BIT_ULL(42)
+#define IRDMA_CQPSQ_STAG_MW1_BIND_DONT_VLDT_KEY BIT_ULL(58)
+
+#define IRDMA_CQPSQ_STAG_LPBLSIZE IRDMA_CQPSQ_CQ_LPBLSIZE
+#define IRDMA_CQPSQ_STAG_HPAGESIZE GENMASK_ULL(47, 46)
+#define IRDMA_CQPSQ_STAG_ARIGHTS GENMASK_ULL(52, 48)
+#define IRDMA_CQPSQ_STAG_REMACCENABLED BIT_ULL(53)
+#define IRDMA_CQPSQ_STAG_VABASEDTO BIT_ULL(59)
+#define IRDMA_CQPSQ_STAG_USEHMCFNIDX BIT_ULL(60)
+#define IRDMA_CQPSQ_STAG_USEPFRID BIT_ULL(61)
+
+#define IRDMA_CQPSQ_STAG_PBA IRDMA_CQPHC_QPCTX
+#define IRDMA_CQPSQ_STAG_HMCFNIDX GENMASK_ULL(5, 0)
+
+#define IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX GENMASK_ULL(27, 0)
+#define IRDMA_CQPSQ_QUERYSTAG_IDX IRDMA_CQPSQ_STAG_IDX
+#define IRDMA_CQPSQ_MLM_TABLEIDX GENMASK_ULL(5, 0)
+#define IRDMA_CQPSQ_MLM_FREEENTRY BIT_ULL(62)
+#define IRDMA_CQPSQ_MLM_IGNORE_REF_CNT BIT_ULL(61)
+#define IRDMA_CQPSQ_MLM_MAC0 GENMASK_ULL(7, 0)
+#define IRDMA_CQPSQ_MLM_MAC1 GENMASK_ULL(15, 8)
+#define IRDMA_CQPSQ_MLM_MAC2 GENMASK_ULL(23, 16)
+#define IRDMA_CQPSQ_MLM_MAC3 GENMASK_ULL(31, 24)
+#define IRDMA_CQPSQ_MLM_MAC4 GENMASK_ULL(39, 32)
+#define IRDMA_CQPSQ_MLM_MAC5 GENMASK_ULL(47, 40)
+#define IRDMA_CQPSQ_MAT_REACHMAX GENMASK_ULL(31, 0)
+#define IRDMA_CQPSQ_MAT_MACADDR GENMASK_ULL(47, 0)
+#define IRDMA_CQPSQ_MAT_ARPENTRYIDX GENMASK_ULL(11, 0)
+#define IRDMA_CQPSQ_MAT_ENTRYVALID BIT_ULL(42)
+#define IRDMA_CQPSQ_MAT_PERMANENT BIT_ULL(43)
+#define IRDMA_CQPSQ_MAT_QUERY BIT_ULL(44)
+#define IRDMA_CQPSQ_MVPBP_PD_ENTRY_CNT GENMASK_ULL(9, 0)
+#define IRDMA_CQPSQ_MVPBP_FIRST_PD_INX GENMASK_ULL(24, 16)
+#define IRDMA_CQPSQ_MVPBP_SD_INX GENMASK_ULL(43, 32)
+#define IRDMA_CQPSQ_MVPBP_INV_PD_ENT BIT_ULL(62)
+#define IRDMA_CQPSQ_MVPBP_PD_PLPBA GENMASK_ULL(63, 3)
+
+/* Manage Push Page - MPP */
+#define IRDMA_INVALID_PUSH_PAGE_INDEX_GEN_1 0xffff
+#define IRDMA_INVALID_PUSH_PAGE_INDEX 0xffffffff
+
+#define IRDMA_CQPSQ_MPP_QS_HANDLE GENMASK_ULL(9, 0)
+#define IRDMA_CQPSQ_MPP_PPIDX GENMASK_ULL(9, 0)
+#define IRDMA_CQPSQ_MPP_PPTYPE GENMASK_ULL(61, 60)
+
+#define IRDMA_CQPSQ_MPP_FREE_PAGE BIT_ULL(62)
+
+/* Upload Context - UCTX */
+#define IRDMA_CQPSQ_UCTX_QPCTXADDR IRDMA_CQPHC_QPCTX
+#define IRDMA_CQPSQ_UCTX_QPID GENMASK_ULL(23, 0)
+#define IRDMA_CQPSQ_UCTX_QPTYPE GENMASK_ULL(51, 48)
+
+#define IRDMA_CQPSQ_UCTX_RAWFORMAT BIT_ULL(61)
+#define IRDMA_CQPSQ_UCTX_FREEZEQP BIT_ULL(62)
+
+#define IRDMA_CQPSQ_MHMC_VFIDX GENMASK_ULL(15, 0)
+#define IRDMA_CQPSQ_MHMC_FREEPMFN BIT_ULL(62)
+
+#define IRDMA_CQPSQ_SHMCRP_HMC_PROFILE GENMASK_ULL(2, 0)
+#define IRDMA_CQPSQ_SHMCRP_VFNUM GENMASK_ULL(37, 32)
+#define IRDMA_CQPSQ_CEQ_CEQSIZE GENMASK_ULL(21, 0)
+#define IRDMA_CQPSQ_CEQ_CEQID GENMASK_ULL(9, 0)
+
+#define IRDMA_CQPSQ_CEQ_LPBLSIZE IRDMA_CQPSQ_CQ_LPBLSIZE
+#define IRDMA_CQPSQ_CEQ_VMAP BIT_ULL(47)
+#define IRDMA_CQPSQ_CEQ_ITRNOEXPIRE BIT_ULL(46)
+#define IRDMA_CQPSQ_CEQ_FIRSTPMPBLIDX GENMASK_ULL(27, 0)
+#define IRDMA_CQPSQ_AEQ_AEQECNT GENMASK_ULL(18, 0)
+#define IRDMA_CQPSQ_AEQ_LPBLSIZE IRDMA_CQPSQ_CQ_LPBLSIZE
+#define IRDMA_CQPSQ_AEQ_VMAP BIT_ULL(47)
+#define IRDMA_CQPSQ_AEQ_FIRSTPMPBLIDX GENMASK_ULL(27, 0)
+
+#define IRDMA_COMMIT_FPM_QPCNT GENMASK_ULL(18, 0)
+
+#define IRDMA_COMMIT_FPM_BASE_S 32
+#define IRDMA_CQPSQ_CFPM_HMCFNID GENMASK_ULL(5, 0)
+#define IRDMA_CQPSQ_FWQE_AECODE GENMASK_ULL(15, 0)
+#define IRDMA_CQPSQ_FWQE_AESOURCE GENMASK_ULL(19, 16)
+#define IRDMA_CQPSQ_FWQE_RQMNERR GENMASK_ULL(15, 0)
+#define IRDMA_CQPSQ_FWQE_RQMJERR GENMASK_ULL(31, 16)
+#define IRDMA_CQPSQ_FWQE_SQMNERR GENMASK_ULL(47, 32)
+#define IRDMA_CQPSQ_FWQE_SQMJERR GENMASK_ULL(63, 48)
+#define IRDMA_CQPSQ_FWQE_QPID GENMASK_ULL(23, 0)
+#define IRDMA_CQPSQ_FWQE_GENERATE_AE BIT_ULL(59)
+#define IRDMA_CQPSQ_FWQE_USERFLCODE BIT_ULL(60)
+#define IRDMA_CQPSQ_FWQE_FLUSHSQ BIT_ULL(61)
+#define IRDMA_CQPSQ_FWQE_FLUSHRQ BIT_ULL(62)
+#define IRDMA_CQPSQ_MAPT_PORT GENMASK_ULL(15, 0)
+#define IRDMA_CQPSQ_MAPT_ADDPORT BIT_ULL(62)
+#define IRDMA_CQPSQ_UPESD_SDCMD GENMASK_ULL(31, 0)
+#define IRDMA_CQPSQ_UPESD_SDDATALOW GENMASK_ULL(31, 0)
+#define IRDMA_CQPSQ_UPESD_SDDATAHI GENMASK_ULL(63, 32)
+#define IRDMA_CQPSQ_UPESD_HMCFNID GENMASK_ULL(5, 0)
+#define IRDMA_CQPSQ_UPESD_ENTRY_VALID BIT_ULL(63)
+
+#define IRDMA_CQPSQ_UPESD_BM_PF 0
+#define IRDMA_CQPSQ_UPESD_BM_CP_LM 1
+#define IRDMA_CQPSQ_UPESD_BM_AXF 2
+#define IRDMA_CQPSQ_UPESD_BM_LM 4
+#define IRDMA_CQPSQ_UPESD_BM GENMASK_ULL(34, 32)
+#define IRDMA_CQPSQ_UPESD_ENTRY_COUNT GENMASK_ULL(3, 0)
+#define IRDMA_CQPSQ_UPESD_SKIP_ENTRY BIT_ULL(7)
+#define IRDMA_CQPSQ_SUSPENDQP_QPID GENMASK_ULL(23, 0)
+#define IRDMA_CQPSQ_RESUMEQP_QSHANDLE GENMASK_ULL(31, 0)
+#define IRDMA_CQPSQ_RESUMEQP_QPID GENMASK(23, 0)
+
+#define IRDMA_CQPSQ_MIN_STAG_INVALID 0x0001
+#define IRDMA_CQPSQ_MIN_SUSPEND_PND 0x0005
+
+#define IRDMA_CQPSQ_MAJ_NO_ERROR 0x0000
+#define IRDMA_CQPSQ_MAJ_OBJCACHE_ERROR 0xF000
+#define IRDMA_CQPSQ_MAJ_CNTXTCACHE_ERROR 0xF001
+#define IRDMA_CQPSQ_MAJ_ERROR 0xFFFF
+#define IRDMAQPC_DDP_VER GENMASK_ULL(1, 0)
+#define IRDMAQPC_IBRDENABLE BIT_ULL(2)
+#define IRDMAQPC_IPV4 BIT_ULL(3)
+#define IRDMAQPC_NONAGLE BIT_ULL(4)
+#define IRDMAQPC_INSERTVLANTAG BIT_ULL(5)
+#define IRDMAQPC_ISQP1 BIT_ULL(6)
+#define IRDMAQPC_TIMESTAMP BIT_ULL(7)
+#define IRDMAQPC_RQWQESIZE GENMASK_ULL(9, 8)
+#define IRDMAQPC_INSERTL2TAG2 BIT_ULL(11)
+#define IRDMAQPC_LIMIT GENMASK_ULL(13, 12)
+
+#define IRDMAQPC_ECN_EN BIT_ULL(14)
+#define IRDMAQPC_DROPOOOSEG BIT_ULL(15)
+#define IRDMAQPC_DUPACK_THRESH GENMASK_ULL(18, 16)
+#define IRDMAQPC_ERR_RQ_IDX_VALID BIT_ULL(19)
+#define IRDMAQPC_DIS_VLAN_CHECKS GENMASK_ULL(21, 19)
+#define IRDMAQPC_DC_TCP_EN BIT_ULL(25)
+#define IRDMAQPC_RCVTPHEN BIT_ULL(28)
+#define IRDMAQPC_XMITTPHEN BIT_ULL(29)
+#define IRDMAQPC_RQTPHEN BIT_ULL(30)
+#define IRDMAQPC_SQTPHEN BIT_ULL(31)
+#define IRDMAQPC_PPIDX GENMASK_ULL(41, 32)
+#define IRDMAQPC_PMENA BIT_ULL(47)
+#define IRDMAQPC_RDMAP_VER GENMASK_ULL(63, 62)
+#define IRDMAQPC_ROCE_TVER GENMASK_ULL(63, 60)
+
+#define IRDMAQPC_SQADDR IRDMA_CQPHC_QPCTX
+#define IRDMAQPC_RQADDR IRDMA_CQPHC_QPCTX
+#define IRDMAQPC_TTL GENMASK_ULL(7, 0)
+#define IRDMAQPC_RQSIZE GENMASK_ULL(11, 8)
+#define IRDMAQPC_SQSIZE GENMASK_ULL(15, 12)
+#define IRDMAQPC_GEN1_SRCMACADDRIDX GENMASK(21, 16)
+#define IRDMAQPC_AVOIDSTRETCHACK BIT_ULL(23)
+#define IRDMAQPC_TOS GENMASK_ULL(31, 24)
+#define IRDMAQPC_SRCPORTNUM GENMASK_ULL(47, 32)
+#define IRDMAQPC_DESTPORTNUM GENMASK_ULL(63, 48)
+#define IRDMAQPC_DESTIPADDR0 GENMASK_ULL(63, 32)
+#define IRDMAQPC_DESTIPADDR1 GENMASK_ULL(31, 0)
+#define IRDMAQPC_DESTIPADDR2 GENMASK_ULL(63, 32)
+#define IRDMAQPC_DESTIPADDR3 GENMASK_ULL(31, 0)
+#define IRDMAQPC_SNDMSS GENMASK_ULL(29, 16)
+#define IRDMAQPC_SYN_RST_HANDLING GENMASK_ULL(31, 30)
+#define IRDMAQPC_VLANTAG GENMASK_ULL(47, 32)
+#define IRDMAQPC_ARPIDX GENMASK_ULL(63, 48)
+#define IRDMAQPC_FLOWLABEL GENMASK_ULL(19, 0)
+#define IRDMAQPC_WSCALE BIT_ULL(20)
+#define IRDMAQPC_KEEPALIVE BIT_ULL(21)
+#define IRDMAQPC_IGNORE_TCP_OPT BIT_ULL(22)
+#define IRDMAQPC_IGNORE_TCP_UNS_OPT BIT_ULL(23)
+#define IRDMAQPC_TCPSTATE GENMASK_ULL(31, 28)
+#define IRDMAQPC_RCVSCALE GENMASK_ULL(35, 32)
+#define IRDMAQPC_SNDSCALE GENMASK_ULL(43, 40)
+#define IRDMAQPC_PDIDX GENMASK_ULL(63, 48)
+#define IRDMAQPC_PDIDXHI GENMASK_ULL(21, 20)
+#define IRDMAQPC_PKEY GENMASK_ULL(47, 32)
+#define IRDMAQPC_ACKCREDITS GENMASK_ULL(24, 20)
+#define IRDMAQPC_QKEY GENMASK_ULL(63, 32)
+#define IRDMAQPC_DESTQP GENMASK_ULL(23, 0)
+#define IRDMAQPC_KALIVE_TIMER_MAX_PROBES GENMASK_ULL(23, 16)
+#define IRDMAQPC_KEEPALIVE_INTERVAL GENMASK_ULL(31, 24)
+#define IRDMAQPC_TIMESTAMP_RECENT GENMASK_ULL(31, 0)
+#define IRDMAQPC_TIMESTAMP_AGE GENMASK_ULL(63, 32)
+#define IRDMAQPC_SNDNXT GENMASK_ULL(31, 0)
+#define IRDMAQPC_ISN GENMASK_ULL(55, 32)
+#define IRDMAQPC_PSNNXT GENMASK_ULL(23, 0)
+#define IRDMAQPC_LSN GENMASK_ULL(55, 32)
+#define IRDMAQPC_SNDWND GENMASK_ULL(63, 32)
+#define IRDMAQPC_RCVNXT GENMASK_ULL(31, 0)
+#define IRDMAQPC_EPSN GENMASK_ULL(23, 0)
+#define IRDMAQPC_RCVWND GENMASK_ULL(63, 32)
+#define IRDMAQPC_SNDMAX GENMASK_ULL(31, 0)
+#define IRDMAQPC_SNDUNA GENMASK_ULL(63, 32)
+#define IRDMAQPC_PSNMAX GENMASK_ULL(23, 0)
+#define IRDMAQPC_PSNUNA GENMASK_ULL(55, 32)
+#define IRDMAQPC_SRTT GENMASK_ULL(31, 0)
+#define IRDMAQPC_RTTVAR GENMASK_ULL(63, 32)
+#define IRDMAQPC_SSTHRESH GENMASK_ULL(31, 0)
+#define IRDMAQPC_CWND GENMASK_ULL(63, 32)
+#define IRDMAQPC_CWNDROCE GENMASK_ULL(55, 32)
+#define IRDMAQPC_SNDWL1 GENMASK_ULL(31, 0)
+#define IRDMAQPC_SNDWL2 GENMASK_ULL(63, 32)
+#define IRDMAQPC_ERR_RQ_IDX GENMASK_ULL(45, 32)
+#define IRDMAQPC_RTOMIN GENMASK_ULL(63, 57)
+#define IRDMAQPC_MAXSNDWND GENMASK_ULL(31, 0)
+#define IRDMAQPC_REXMIT_THRESH GENMASK_ULL(53, 48)
+#define IRDMAQPC_RNRNAK_THRESH GENMASK_ULL(56, 54)
+#define IRDMAQPC_TXCQNUM GENMASK_ULL(18, 0)
+#define IRDMAQPC_RXCQNUM GENMASK_ULL(50, 32)
+#define IRDMAQPC_STAT_INDEX GENMASK_ULL(6, 0)
+#define IRDMAQPC_Q2ADDR GENMASK_ULL(63, 8)
+#define IRDMAQPC_LASTBYTESENT GENMASK_ULL(7, 0)
+#define IRDMAQPC_MACADDRESS GENMASK_ULL(63, 16)
+#define IRDMAQPC_ORDSIZE GENMASK_ULL(7, 0)
+
+#define IRDMAQPC_IRDSIZE GENMASK_ULL(18, 16)
+
+#define IRDMAQPC_UDPRIVCQENABLE BIT_ULL(19)
+#define IRDMAQPC_WRRDRSPOK BIT_ULL(20)
+#define IRDMAQPC_RDOK BIT_ULL(21)
+#define IRDMAQPC_SNDMARKERS BIT_ULL(22)
+#define IRDMAQPC_DCQCNENABLE BIT_ULL(22)
+#define IRDMAQPC_FW_CC_ENABLE BIT_ULL(28)
+#define IRDMAQPC_RCVNOICRC BIT_ULL(31)
+#define IRDMAQPC_BINDEN BIT_ULL(23)
+#define IRDMAQPC_FASTREGEN BIT_ULL(24)
+#define IRDMAQPC_PRIVEN BIT_ULL(25)
+#define IRDMAQPC_TIMELYENABLE BIT_ULL(27)
+#define IRDMAQPC_THIGH GENMASK_ULL(63, 52)
+#define IRDMAQPC_TLOW GENMASK_ULL(39, 32)
+#define IRDMAQPC_REMENDPOINTIDX GENMASK_ULL(16, 0)
+#define IRDMAQPC_USESTATSINSTANCE BIT_ULL(26)
+#define IRDMAQPC_IWARPMODE BIT_ULL(28)
+#define IRDMAQPC_RCVMARKERS BIT_ULL(29)
+#define IRDMAQPC_ALIGNHDRS BIT_ULL(30)
+#define IRDMAQPC_RCVNOMPACRC BIT_ULL(31)
+#define IRDMAQPC_RCVMARKOFFSET GENMASK_ULL(40, 32)
+#define IRDMAQPC_SNDMARKOFFSET GENMASK_ULL(56, 48)
+
+#define IRDMAQPC_QPCOMPCTX IRDMA_CQPHC_QPCTX
+#define IRDMAQPC_SQTPHVAL GENMASK_ULL(7, 0)
+#define IRDMAQPC_RQTPHVAL GENMASK_ULL(15, 8)
+#define IRDMAQPC_QSHANDLE GENMASK_ULL(25, 16)
+#define IRDMAQPC_EXCEPTION_LAN_QUEUE GENMASK_ULL(43, 32)
+#define IRDMAQPC_LOCAL_IPADDR3 GENMASK_ULL(31, 0)
+#define IRDMAQPC_LOCAL_IPADDR2 GENMASK_ULL(63, 32)
+#define IRDMAQPC_LOCAL_IPADDR1 GENMASK_ULL(31, 0)
+#define IRDMAQPC_LOCAL_IPADDR0 GENMASK_ULL(63, 32)
+#define IRDMA_FW_VER_MINOR GENMASK_ULL(15, 0)
+#define IRDMA_FW_VER_MAJOR GENMASK_ULL(31, 16)
+#define IRDMA_FEATURE_INFO GENMASK_ULL(47, 0)
+#define IRDMA_FEATURE_CNT GENMASK_ULL(47, 32)
+#define IRDMA_FEATURE_TYPE GENMASK_ULL(63, 48)
+
+#define IRDMAQPSQ_OPCODE GENMASK_ULL(37, 32)
+#define IRDMAQPSQ_COPY_HOST_PBL BIT_ULL(43)
+#define IRDMAQPSQ_ADDFRAGCNT GENMASK_ULL(41, 38)
+#define IRDMAQPSQ_PUSHWQE BIT_ULL(56)
+#define IRDMAQPSQ_STREAMMODE BIT_ULL(58)
+#define IRDMAQPSQ_WAITFORRCVPDU BIT_ULL(59)
+#define IRDMAQPSQ_READFENCE BIT_ULL(60)
+#define IRDMAQPSQ_LOCALFENCE BIT_ULL(61)
+#define IRDMAQPSQ_UDPHEADER BIT_ULL(61)
+#define IRDMAQPSQ_L4LEN GENMASK_ULL(45, 42)
+#define IRDMAQPSQ_SIGCOMPL BIT_ULL(62)
+#define IRDMAQPSQ_VALID BIT_ULL(63)
+
+#define IRDMAQPSQ_FRAG_TO IRDMA_CQPHC_QPCTX
+#define IRDMAQPSQ_FRAG_VALID BIT_ULL(63)
+#define IRDMAQPSQ_FRAG_LEN GENMASK_ULL(62, 32)
+#define IRDMAQPSQ_FRAG_STAG GENMASK_ULL(31, 0)
+#define IRDMAQPSQ_GEN1_FRAG_LEN GENMASK_ULL(31, 0)
+#define IRDMAQPSQ_GEN1_FRAG_STAG GENMASK_ULL(63, 32)
+#define IRDMAQPSQ_REMSTAGINV GENMASK_ULL(31, 0)
+#define IRDMAQPSQ_DESTQKEY GENMASK_ULL(31, 0)
+#define IRDMAQPSQ_DESTQPN GENMASK_ULL(55, 32)
+#define IRDMAQPSQ_AHID GENMASK_ULL(16, 0)
+#define IRDMAQPSQ_INLINEDATAFLAG BIT_ULL(57)
+
+#define IRDMA_INLINE_VALID_S 7
+#define IRDMAQPSQ_INLINEDATALEN GENMASK_ULL(55, 48)
+#define IRDMAQPSQ_IMMDATAFLAG BIT_ULL(47)
+#define IRDMAQPSQ_REPORTRTT BIT_ULL(46)
+
+#define IRDMAQPSQ_IMMDATA GENMASK_ULL(63, 0)
+#define IRDMAQPSQ_REMSTAG GENMASK_ULL(31, 0)
+
+#define IRDMAQPSQ_REMTO IRDMA_CQPHC_QPCTX
+
+#define IRDMAQPSQ_STAGRIGHTS GENMASK_ULL(52, 48)
+#define IRDMAQPSQ_VABASEDTO BIT_ULL(53)
+#define IRDMAQPSQ_MEMWINDOWTYPE BIT_ULL(54)
+
+#define IRDMAQPSQ_MWLEN IRDMA_CQPHC_QPCTX
+#define IRDMAQPSQ_PARENTMRSTAG GENMASK_ULL(63, 32)
+#define IRDMAQPSQ_MWSTAG GENMASK_ULL(31, 0)
+
+#define IRDMAQPSQ_BASEVA_TO_FBO IRDMA_CQPHC_QPCTX
+
+#define IRDMAQPSQ_LOCSTAG GENMASK_ULL(31, 0)
+
+#define IRDMAQPSQ_STAGKEY GENMASK_ULL(7, 0)
+#define IRDMAQPSQ_STAGINDEX GENMASK_ULL(31, 8)
+#define IRDMAQPSQ_COPYHOSTPBLS BIT_ULL(43)
+#define IRDMAQPSQ_LPBLSIZE GENMASK_ULL(45, 44)
+#define IRDMAQPSQ_HPAGESIZE GENMASK_ULL(47, 46)
+#define IRDMAQPSQ_STAGLEN GENMASK_ULL(40, 0)
+#define IRDMAQPSQ_FIRSTPMPBLIDXLO GENMASK_ULL(63, 48)
+#define IRDMAQPSQ_FIRSTPMPBLIDXHI GENMASK_ULL(11, 0)
+#define IRDMAQPSQ_PBLADDR GENMASK_ULL(63, 12)
+
+/* iwarp QP RQ WQE common fields */
+#define IRDMAQPRQ_ADDFRAGCNT IRDMAQPSQ_ADDFRAGCNT
+#define IRDMAQPRQ_VALID IRDMAQPSQ_VALID
+#define IRDMAQPRQ_COMPLCTX IRDMA_CQPHC_QPCTX
+#define IRDMAQPRQ_FRAG_LEN IRDMAQPSQ_FRAG_LEN
+#define IRDMAQPRQ_STAG IRDMAQPSQ_FRAG_STAG
+#define IRDMAQPRQ_TO IRDMAQPSQ_FRAG_TO
+
+#define IRDMAPFINT_OICR_HMC_ERR_M BIT(26)
+#define IRDMAPFINT_OICR_PE_PUSH_M BIT(27)
+#define IRDMAPFINT_OICR_PE_CRITERR_M BIT(28)
+
+#define IRDMA_QUERY_FPM_MAX_QPS GENMASK_ULL(18, 0)
+#define IRDMA_QUERY_FPM_MAX_CQS GENMASK_ULL(19, 0)
+#define IRDMA_QUERY_FPM_FIRST_PE_SD_INDEX GENMASK_ULL(13, 0)
+#define IRDMA_QUERY_FPM_MAX_PE_SDS GENMASK_ULL(45, 32)
+#define IRDMA_QUERY_FPM_MAX_CEQS GENMASK_ULL(9, 0)
+#define IRDMA_QUERY_FPM_XFBLOCKSIZE GENMASK_ULL(63, 32)
+#define IRDMA_QUERY_FPM_Q1BLOCKSIZE GENMASK_ULL(63, 32)
+#define IRDMA_QUERY_FPM_HTMULTIPLIER GENMASK_ULL(19, 16)
+#define IRDMA_QUERY_FPM_TIMERBUCKET GENMASK_ULL(47, 32)
+#define IRDMA_QUERY_FPM_RRFBLOCKSIZE GENMASK_ULL(63, 32)
+#define IRDMA_QUERY_FPM_RRFFLBLOCKSIZE GENMASK_ULL(63, 32)
+#define IRDMA_QUERY_FPM_OOISCFBLOCKSIZE GENMASK_ULL(63, 32)
+#define IRDMA_SHMC_PAGE_ALLOCATED_HMC_FN_ID GENMASK_ULL(5, 0)
+
+#define IRDMA_GET_CURRENT_AEQ_ELEM(_aeq) \
+ ( \
+ (_aeq)->aeqe_base[IRDMA_RING_CURRENT_TAIL((_aeq)->aeq_ring)].buf \
+ )
+
+#define IRDMA_GET_CURRENT_CEQ_ELEM(_ceq) \
+ ( \
+ (_ceq)->ceqe_base[IRDMA_RING_CURRENT_TAIL((_ceq)->ceq_ring)].buf \
+ )
+
+#define IRDMA_GET_CEQ_ELEM_AT_POS(_ceq, _pos) \
+ ( \
+ (_ceq)->ceqe_base[_pos].buf \
+ )
+
+#define IRDMA_RING_GET_NEXT_TAIL(_ring, _idx) \
+ ( \
+ ((_ring).tail + (_idx)) % (_ring).size \
+ )
+
+#define IRDMA_CQP_INIT_WQE(wqe) memset(wqe, 0, 64)
+
+#define IRDMA_GET_CURRENT_CQ_ELEM(_cq) \
+ ( \
+ (_cq)->cq_base[IRDMA_RING_CURRENT_HEAD((_cq)->cq_ring)].buf \
+ )
+#define IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(_cq) \
+ ( \
+ ((struct irdma_extended_cqe *) \
+ ((_cq)->cq_base))[IRDMA_RING_CURRENT_HEAD((_cq)->cq_ring)].buf \
+ )
+
+#define IRDMA_RING_INIT(_ring, _size) \
+ { \
+ (_ring).head = 0; \
+ (_ring).tail = 0; \
+ (_ring).size = (_size); \
+ }
+#define IRDMA_RING_SIZE(_ring) ((_ring).size)
+#define IRDMA_RING_CURRENT_HEAD(_ring) ((_ring).head)
+#define IRDMA_RING_CURRENT_TAIL(_ring) ((_ring).tail)
+
+#define IRDMA_RING_MOVE_HEAD(_ring, _retcode) \
+ { \
+ register u32 size; \
+ size = (_ring).size; \
+ if (!IRDMA_RING_FULL_ERR(_ring)) { \
+ (_ring).head = ((_ring).head + 1) % size; \
+ (_retcode) = 0; \
+ } else { \
+ (_retcode) = IRDMA_ERR_RING_FULL; \
+ } \
+ }
+#define IRDMA_RING_MOVE_HEAD_BY_COUNT(_ring, _count, _retcode) \
+ { \
+ register u32 size; \
+ size = (_ring).size; \
+ if ((IRDMA_RING_USED_QUANTA(_ring) + (_count)) < size) { \
+ (_ring).head = ((_ring).head + (_count)) % size; \
+ (_retcode) = 0; \
+ } else { \
+ (_retcode) = IRDMA_ERR_RING_FULL; \
+ } \
+ }
+#define IRDMA_SQ_RING_MOVE_HEAD(_ring, _retcode) \
+ { \
+ register u32 size; \
+ size = (_ring).size; \
+ if (!IRDMA_SQ_RING_FULL_ERR(_ring)) { \
+ (_ring).head = ((_ring).head + 1) % size; \
+ (_retcode) = 0; \
+ } else { \
+ (_retcode) = IRDMA_ERR_RING_FULL; \
+ } \
+ }
+#define IRDMA_SQ_RING_MOVE_HEAD_BY_COUNT(_ring, _count, _retcode) \
+ { \
+ register u32 size; \
+ size = (_ring).size; \
+ if ((IRDMA_RING_USED_QUANTA(_ring) + (_count)) < (size - 256)) { \
+ (_ring).head = ((_ring).head + (_count)) % size; \
+ (_retcode) = 0; \
+ } else { \
+ (_retcode) = IRDMA_ERR_RING_FULL; \
+ } \
+ }
+#define IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(_ring, _count) \
+ (_ring).head = ((_ring).head + (_count)) % (_ring).size
+
+#define IRDMA_RING_MOVE_TAIL(_ring) \
+ (_ring).tail = ((_ring).tail + 1) % (_ring).size
+
+#define IRDMA_RING_MOVE_HEAD_NOCHECK(_ring) \
+ (_ring).head = ((_ring).head + 1) % (_ring).size
+
+#define IRDMA_RING_MOVE_TAIL_BY_COUNT(_ring, _count) \
+ (_ring).tail = ((_ring).tail + (_count)) % (_ring).size
+
+#define IRDMA_RING_SET_TAIL(_ring, _pos) \
+ (_ring).tail = (_pos) % (_ring).size
+
+#define IRDMA_RING_FULL_ERR(_ring) \
+ ( \
+ (IRDMA_RING_USED_QUANTA(_ring) == ((_ring).size - 1)) \
+ )
+
+#define IRDMA_ERR_RING_FULL2(_ring) \
+ ( \
+ (IRDMA_RING_USED_QUANTA(_ring) == ((_ring).size - 2)) \
+ )
+
+#define IRDMA_ERR_RING_FULL3(_ring) \
+ ( \
+ (IRDMA_RING_USED_QUANTA(_ring) == ((_ring).size - 3)) \
+ )
+
+#define IRDMA_SQ_RING_FULL_ERR(_ring) \
+ ( \
+ (IRDMA_RING_USED_QUANTA(_ring) == ((_ring).size - 257)) \
+ )
+
+#define IRDMA_ERR_SQ_RING_FULL2(_ring) \
+ ( \
+ (IRDMA_RING_USED_QUANTA(_ring) == ((_ring).size - 258)) \
+ )
+#define IRDMA_ERR_SQ_RING_FULL3(_ring) \
+ ( \
+ (IRDMA_RING_USED_QUANTA(_ring) == ((_ring).size - 259)) \
+ )
+#define IRDMA_RING_MORE_WORK(_ring) \
+ ( \
+ (IRDMA_RING_USED_QUANTA(_ring) != 0) \
+ )
+
+#define IRDMA_RING_USED_QUANTA(_ring) \
+ ( \
+ (((_ring).head + (_ring).size - (_ring).tail) % (_ring).size) \
+ )
+
+#define IRDMA_RING_FREE_QUANTA(_ring) \
+ ( \
+ ((_ring).size - IRDMA_RING_USED_QUANTA(_ring) - 1) \
+ )
+
+#define IRDMA_SQ_RING_FREE_QUANTA(_ring) \
+ ( \
+ ((_ring).size - IRDMA_RING_USED_QUANTA(_ring) - 257) \
+ )
+
+#define IRDMA_ATOMIC_RING_MOVE_HEAD(_ring, index, _retcode) \
+ { \
+ index = IRDMA_RING_CURRENT_HEAD(_ring); \
+ IRDMA_RING_MOVE_HEAD(_ring, _retcode); \
+ }
+
+enum irdma_qp_wqe_size {
+ IRDMA_WQE_SIZE_32 = 32,
+ IRDMA_WQE_SIZE_64 = 64,
+ IRDMA_WQE_SIZE_96 = 96,
+ IRDMA_WQE_SIZE_128 = 128,
+ IRDMA_WQE_SIZE_256 = 256,
+};
+
+enum irdma_ws_node_op {
+ IRDMA_ADD_NODE = 0,
+ IRDMA_MODIFY_NODE,
+ IRDMA_DEL_NODE,
+};
+
+enum { IRDMA_Q_ALIGNMENT_M = (128 - 1),
+ IRDMA_AEQ_ALIGNMENT_M = (256 - 1),
+ IRDMA_Q2_ALIGNMENT_M = (256 - 1),
+ IRDMA_CEQ_ALIGNMENT_M = (256 - 1),
+ IRDMA_CQ0_ALIGNMENT_M = (256 - 1),
+ IRDMA_HOST_CTX_ALIGNMENT_M = (4 - 1),
+ IRDMA_SHADOWAREA_M = (128 - 1),
+ IRDMA_FPM_QUERY_BUF_ALIGNMENT_M = (4 - 1),
+ IRDMA_FPM_COMMIT_BUF_ALIGNMENT_M = (4 - 1),
+};
+
+enum irdma_alignment {
+ IRDMA_CQP_ALIGNMENT = 0x200,
+ IRDMA_AEQ_ALIGNMENT = 0x100,
+ IRDMA_CEQ_ALIGNMENT = 0x100,
+ IRDMA_CQ0_ALIGNMENT = 0x100,
+ IRDMA_SD_BUF_ALIGNMENT = 0x80,
+ IRDMA_FEATURE_BUF_ALIGNMENT = 0x8,
+};
+
+enum icrdma_protocol_used {
+ ICRDMA_ANY_PROTOCOL = 0,
+ ICRDMA_IWARP_PROTOCOL_ONLY = 1,
+ ICRDMA_ROCE_PROTOCOL_ONLY = 2,
+};
+
+/**
+ * set_64bit_val - set 64 bit value to hw wqe
+ * @wqe_words: wqe addr to write
+ * @byte_index: index in wqe
+ * @val: value to write
+ **/
+static inline void set_64bit_val(__le64 *wqe_words, u32 byte_index, u64 val)
+{
+ wqe_words[byte_index >> 3] = cpu_to_le64(val);
+}
+
+/**
+ * set_32bit_val - set 32 bit value to hw wqe
+ * @wqe_words: wqe addr to write
+ * @byte_index: index in wqe
+ * @val: value to write
+ **/
+static inline void set_32bit_val(__le32 *wqe_words, u32 byte_index, u32 val)
+{
+ wqe_words[byte_index >> 2] = cpu_to_le32(val);
+}
+
+/**
+ * get_64bit_val - read 64 bit value from wqe
+ * @wqe_words: wqe addr
+ * @byte_index: index to read from
+ * @val: read value
+ **/
+static inline void get_64bit_val(__le64 *wqe_words, u32 byte_index, u64 *val)
+{
+ *val = le64_to_cpu(wqe_words[byte_index >> 3]);
+}
+
+/**
+ * get_32bit_val - read 32 bit value from wqe
+ * @wqe_words: wqe addr
+ * @byte_index: index to reaad from
+ * @val: return 32 bit value
+ **/
+static inline void get_32bit_val(__le32 *wqe_words, u32 byte_index, u32 *val)
+{
+ *val = le32_to_cpu(wqe_words[byte_index >> 2]);
+}
+#endif /* IRDMA_DEFS_H */
diff --git a/drivers/infiniband/hw/irdma/hmc.c b/drivers/infiniband/hw/irdma/hmc.c
new file mode 100644
index 000000000000..ecffcb93c05a
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/hmc.c
@@ -0,0 +1,710 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#include "osdep.h"
+#include "status.h"
+#include "hmc.h"
+#include "defs.h"
+#include "type.h"
+#include "protos.h"
+
+/**
+ * irdma_find_sd_index_limit - finds segment descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @type: type of HMC resources we're searching
+ * @idx: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @sd_idx: pointer to return index of the segment descriptor in question
+ * @sd_limit: pointer to return the maximum number of segment descriptors
+ *
+ * This function calculates the segment descriptor index and index limit
+ * for the resource defined by irdma_hmc_rsrc_type.
+ */
+
+static void irdma_find_sd_index_limit(struct irdma_hmc_info *hmc_info, u32 type,
+ u32 idx, u32 cnt, u32 *sd_idx,
+ u32 *sd_limit)
+{
+ u64 fpm_addr, fpm_limit;
+
+ fpm_addr = hmc_info->hmc_obj[(type)].base +
+ hmc_info->hmc_obj[type].size * idx;
+ fpm_limit = fpm_addr + hmc_info->hmc_obj[type].size * cnt;
+ *sd_idx = (u32)(fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE);
+ *sd_limit = (u32)((fpm_limit - 1) / IRDMA_HMC_DIRECT_BP_SIZE);
+ *sd_limit += 1;
+}
+
+/**
+ * irdma_find_pd_index_limit - finds page descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @type: HMC resource type we're examining
+ * @idx: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @pd_idx: pointer to return page descriptor index
+ * @pd_limit: pointer to return page descriptor index limit
+ *
+ * Calculates the page descriptor index and index limit for the resource
+ * defined by irdma_hmc_rsrc_type.
+ */
+
+static void irdma_find_pd_index_limit(struct irdma_hmc_info *hmc_info, u32 type,
+ u32 idx, u32 cnt, u32 *pd_idx,
+ u32 *pd_limit)
+{
+ u64 fpm_adr, fpm_limit;
+
+ fpm_adr = hmc_info->hmc_obj[type].base +
+ hmc_info->hmc_obj[type].size * idx;
+ fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt);
+ *pd_idx = (u32)(fpm_adr / IRDMA_HMC_PAGED_BP_SIZE);
+ *pd_limit = (u32)((fpm_limit - 1) / IRDMA_HMC_PAGED_BP_SIZE);
+ *pd_limit += 1;
+}
+
+/**
+ * irdma_set_sd_entry - setup entry for sd programming
+ * @pa: physical addr
+ * @idx: sd index
+ * @type: paged or direct sd
+ * @entry: sd entry ptr
+ */
+static void irdma_set_sd_entry(u64 pa, u32 idx, enum irdma_sd_entry_type type,
+ struct irdma_update_sd_entry *entry)
+{
+ entry->data = pa |
+ FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT, IRDMA_HMC_MAX_BP_COUNT) |
+ FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDTYPE,
+ type == IRDMA_SD_TYPE_PAGED ? 0 : 1) |
+ FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDVALID, 1);
+
+ entry->cmd = idx | FIELD_PREP(IRDMA_PFHMC_SDCMD_PMSDWR, 1) | BIT(15);
+}
+
+/**
+ * irdma_clr_sd_entry - setup entry for sd clear
+ * @idx: sd index
+ * @type: paged or direct sd
+ * @entry: sd entry ptr
+ */
+static void irdma_clr_sd_entry(u32 idx, enum irdma_sd_entry_type type,
+ struct irdma_update_sd_entry *entry)
+{
+ entry->data = FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT, IRDMA_HMC_MAX_BP_COUNT) |
+ FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDTYPE,
+ type == IRDMA_SD_TYPE_PAGED ? 0 : 1);
+
+ entry->cmd = idx | FIELD_PREP(IRDMA_PFHMC_SDCMD_PMSDWR, 1) | BIT(15);
+}
+
+/**
+ * irdma_invalidate_pf_hmc_pd - Invalidates the pd cache in the hardware for PF
+ * @dev: pointer to our device struct
+ * @sd_idx: segment descriptor index
+ * @pd_idx: page descriptor index
+ */
+static inline void irdma_invalidate_pf_hmc_pd(struct irdma_sc_dev *dev, u32 sd_idx,
+ u32 pd_idx)
+{
+ u32 val = FIELD_PREP(IRDMA_PFHMC_PDINV_PMSDIDX, sd_idx) |
+ FIELD_PREP(IRDMA_PFHMC_PDINV_PMSDPARTSEL, 1) |
+ FIELD_PREP(IRDMA_PFHMC_PDINV_PMPDIDX, pd_idx);
+
+ writel(val, dev->hw_regs[IRDMA_PFHMC_PDINV]);
+}
+
+/**
+ * irdma_hmc_sd_one - setup 1 sd entry for cqp
+ * @dev: pointer to the device structure
+ * @hmc_fn_id: hmc's function id
+ * @pa: physical addr
+ * @sd_idx: sd index
+ * @type: paged or direct sd
+ * @setsd: flag to set or clear sd
+ */
+enum irdma_status_code irdma_hmc_sd_one(struct irdma_sc_dev *dev, u8 hmc_fn_id,
+ u64 pa, u32 sd_idx,
+ enum irdma_sd_entry_type type,
+ bool setsd)
+{
+ struct irdma_update_sds_info sdinfo;
+
+ sdinfo.cnt = 1;
+ sdinfo.hmc_fn_id = hmc_fn_id;
+ if (setsd)
+ irdma_set_sd_entry(pa, sd_idx, type, sdinfo.entry);
+ else
+ irdma_clr_sd_entry(sd_idx, type, sdinfo.entry);
+ return dev->cqp->process_cqp_sds(dev, &sdinfo);
+}
+
+/**
+ * irdma_hmc_sd_grp - setup group of sd entries for cqp
+ * @dev: pointer to the device structure
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @sd_index: sd index
+ * @sd_cnt: number of sd entries
+ * @setsd: flag to set or clear sd
+ */
+static enum irdma_status_code irdma_hmc_sd_grp(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info,
+ u32 sd_index, u32 sd_cnt,
+ bool setsd)
+{
+ struct irdma_hmc_sd_entry *sd_entry;
+ struct irdma_update_sds_info sdinfo = {};
+ u64 pa;
+ u32 i;
+ enum irdma_status_code ret_code = 0;
+
+ sdinfo.hmc_fn_id = hmc_info->hmc_fn_id;
+ for (i = sd_index; i < sd_index + sd_cnt; i++) {
+ sd_entry = &hmc_info->sd_table.sd_entry[i];
+ if (!sd_entry || (!sd_entry->valid && setsd) ||
+ (sd_entry->valid && !setsd))
+ continue;
+ if (setsd) {
+ pa = (sd_entry->entry_type == IRDMA_SD_TYPE_PAGED) ?
+ sd_entry->u.pd_table.pd_page_addr.pa :
+ sd_entry->u.bp.addr.pa;
+ irdma_set_sd_entry(pa, i, sd_entry->entry_type,
+ &sdinfo.entry[sdinfo.cnt]);
+ } else {
+ irdma_clr_sd_entry(i, sd_entry->entry_type,
+ &sdinfo.entry[sdinfo.cnt]);
+ }
+ sdinfo.cnt++;
+ if (sdinfo.cnt == IRDMA_MAX_SD_ENTRIES) {
+ ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);
+ if (ret_code) {
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: sd_programming failed err=%d\n",
+ ret_code);
+ return ret_code;
+ }
+
+ sdinfo.cnt = 0;
+ }
+ }
+ if (sdinfo.cnt)
+ ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);
+
+ return ret_code;
+}
+
+/**
+ * irdma_hmc_finish_add_sd_reg - program sd entries for objects
+ * @dev: pointer to the device structure
+ * @info: create obj info
+ */
+static enum irdma_status_code
+irdma_hmc_finish_add_sd_reg(struct irdma_sc_dev *dev,
+ struct irdma_hmc_create_obj_info *info)
+{
+ if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
+ return IRDMA_ERR_INVALID_HMC_OBJ_INDEX;
+
+ if ((info->start_idx + info->count) >
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt)
+ return IRDMA_ERR_INVALID_HMC_OBJ_COUNT;
+
+ if (!info->add_sd_cnt)
+ return 0;
+ return irdma_hmc_sd_grp(dev, info->hmc_info,
+ info->hmc_info->sd_indexes[0], info->add_sd_cnt,
+ true);
+}
+
+/**
+ * irdma_sc_create_hmc_obj - allocate backing store for hmc objects
+ * @dev: pointer to the device structure
+ * @info: pointer to irdma_hmc_create_obj_info struct
+ *
+ * This will allocate memory for PDs and backing pages and populate
+ * the sd and pd entries.
+ */
+enum irdma_status_code
+irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev,
+ struct irdma_hmc_create_obj_info *info)
+{
+ struct irdma_hmc_sd_entry *sd_entry;
+ u32 sd_idx, sd_lmt;
+ u32 pd_idx = 0, pd_lmt = 0;
+ u32 pd_idx1 = 0, pd_lmt1 = 0;
+ u32 i, j;
+ bool pd_error = false;
+ enum irdma_status_code ret_code = 0;
+
+ if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
+ return IRDMA_ERR_INVALID_HMC_OBJ_INDEX;
+
+ if ((info->start_idx + info->count) >
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: error type %u, start = %u, req cnt %u, cnt = %u\n",
+ info->rsrc_type, info->start_idx, info->count,
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt);
+ return IRDMA_ERR_INVALID_HMC_OBJ_COUNT;
+ }
+
+ irdma_find_sd_index_limit(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count, &sd_idx,
+ &sd_lmt);
+ if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
+ sd_lmt > info->hmc_info->sd_table.sd_cnt) {
+ return IRDMA_ERR_INVALID_SD_INDEX;
+ }
+
+ irdma_find_pd_index_limit(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count, &pd_idx,
+ &pd_lmt);
+
+ for (j = sd_idx; j < sd_lmt; j++) {
+ ret_code = irdma_add_sd_table_entry(dev->hw, info->hmc_info, j,
+ info->entry_type,
+ IRDMA_HMC_DIRECT_BP_SIZE);
+ if (ret_code)
+ goto exit_sd_error;
+
+ sd_entry = &info->hmc_info->sd_table.sd_entry[j];
+ if (sd_entry->entry_type == IRDMA_SD_TYPE_PAGED &&
+ (dev->hmc_info == info->hmc_info &&
+ info->rsrc_type != IRDMA_HMC_IW_PBLE)) {
+ pd_idx1 = max(pd_idx, (j * IRDMA_HMC_MAX_BP_COUNT));
+ pd_lmt1 = min(pd_lmt, (j + 1) * IRDMA_HMC_MAX_BP_COUNT);
+ for (i = pd_idx1; i < pd_lmt1; i++) {
+ /* update the pd table entry */
+ ret_code = irdma_add_pd_table_entry(dev,
+ info->hmc_info,
+ i, NULL);
+ if (ret_code) {
+ pd_error = true;
+ break;
+ }
+ }
+ if (pd_error) {
+ while (i && (i > pd_idx1)) {
+ irdma_remove_pd_bp(dev, info->hmc_info,
+ i - 1);
+ i--;
+ }
+ }
+ }
+ if (sd_entry->valid)
+ continue;
+
+ info->hmc_info->sd_indexes[info->add_sd_cnt] = (u16)j;
+ info->add_sd_cnt++;
+ sd_entry->valid = true;
+ }
+ return irdma_hmc_finish_add_sd_reg(dev, info);
+
+exit_sd_error:
+ while (j && (j > sd_idx)) {
+ sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
+ switch (sd_entry->entry_type) {
+ case IRDMA_SD_TYPE_PAGED:
+ pd_idx1 = max(pd_idx, (j - 1) * IRDMA_HMC_MAX_BP_COUNT);
+ pd_lmt1 = min(pd_lmt, (j * IRDMA_HMC_MAX_BP_COUNT));
+ for (i = pd_idx1; i < pd_lmt1; i++)
+ irdma_prep_remove_pd_page(info->hmc_info, i);
+ break;
+ case IRDMA_SD_TYPE_DIRECT:
+ irdma_prep_remove_pd_page(info->hmc_info, (j - 1));
+ break;
+ default:
+ ret_code = IRDMA_ERR_INVALID_SD_TYPE;
+ break;
+ }
+ j--;
+ }
+
+ return ret_code;
+}
+
+/**
+ * irdma_finish_del_sd_reg - delete sd entries for objects
+ * @dev: pointer to the device structure
+ * @info: dele obj info
+ * @reset: true if called before reset
+ */
+static enum irdma_status_code
+irdma_finish_del_sd_reg(struct irdma_sc_dev *dev,
+ struct irdma_hmc_del_obj_info *info, bool reset)
+{
+ struct irdma_hmc_sd_entry *sd_entry;
+ enum irdma_status_code ret_code = 0;
+ u32 i, sd_idx;
+ struct irdma_dma_mem *mem;
+
+ if (!reset)
+ ret_code = irdma_hmc_sd_grp(dev, info->hmc_info,
+ info->hmc_info->sd_indexes[0],
+ info->del_sd_cnt, false);
+
+ if (ret_code)
+ ibdev_dbg(to_ibdev(dev), "HMC: error cqp sd sd_grp\n");
+ for (i = 0; i < info->del_sd_cnt; i++) {
+ sd_idx = info->hmc_info->sd_indexes[i];
+ sd_entry = &info->hmc_info->sd_table.sd_entry[sd_idx];
+ mem = (sd_entry->entry_type == IRDMA_SD_TYPE_PAGED) ?
+ &sd_entry->u.pd_table.pd_page_addr :
+ &sd_entry->u.bp.addr;
+
+ if (!mem || !mem->va) {
+ ibdev_dbg(to_ibdev(dev), "HMC: error cqp sd mem\n");
+ } else {
+ dma_free_coherent(dev->hw->device, mem->size, mem->va,
+ mem->pa);
+ mem->va = NULL;
+ }
+ }
+
+ return ret_code;
+}
+
+/**
+ * irdma_sc_del_hmc_obj - remove pe hmc objects
+ * @dev: pointer to the device structure
+ * @info: pointer to irdma_hmc_del_obj_info struct
+ * @reset: true if called before reset
+ *
+ * This will de-populate the SDs and PDs. It frees
+ * the memory for PDS and backing storage. After this function is returned,
+ * caller should deallocate memory allocated previously for
+ * book-keeping information about PDs and backing storage.
+ */
+enum irdma_status_code irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev,
+ struct irdma_hmc_del_obj_info *info,
+ bool reset)
+{
+ struct irdma_hmc_pd_table *pd_table;
+ u32 sd_idx, sd_lmt;
+ u32 pd_idx, pd_lmt, rel_pd_idx;
+ u32 i, j;
+ enum irdma_status_code ret_code = 0;
+
+ if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: error start_idx[%04d] >= [type %04d].cnt[%04d]\n",
+ info->start_idx, info->rsrc_type,
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt);
+ return IRDMA_ERR_INVALID_HMC_OBJ_INDEX;
+ }
+
+ if ((info->start_idx + info->count) >
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: error start_idx[%04d] + count %04d >= [type %04d].cnt[%04d]\n",
+ info->start_idx, info->count, info->rsrc_type,
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt);
+ return IRDMA_ERR_INVALID_HMC_OBJ_COUNT;
+ }
+
+ irdma_find_pd_index_limit(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count, &pd_idx,
+ &pd_lmt);
+
+ for (j = pd_idx; j < pd_lmt; j++) {
+ sd_idx = j / IRDMA_HMC_PD_CNT_IN_SD;
+
+ if (!info->hmc_info->sd_table.sd_entry[sd_idx].valid)
+ continue;
+
+ if (info->hmc_info->sd_table.sd_entry[sd_idx].entry_type !=
+ IRDMA_SD_TYPE_PAGED)
+ continue;
+
+ rel_pd_idx = j % IRDMA_HMC_PD_CNT_IN_SD;
+ pd_table = &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+ if (pd_table->pd_entry &&
+ pd_table->pd_entry[rel_pd_idx].valid) {
+ ret_code = irdma_remove_pd_bp(dev, info->hmc_info, j);
+ if (ret_code) {
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: remove_pd_bp error\n");
+ return ret_code;
+ }
+ }
+ }
+
+ irdma_find_sd_index_limit(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count, &sd_idx,
+ &sd_lmt);
+ if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
+ sd_lmt > info->hmc_info->sd_table.sd_cnt) {
+ ibdev_dbg(to_ibdev(dev), "HMC: invalid sd_idx\n");
+ return IRDMA_ERR_INVALID_SD_INDEX;
+ }
+
+ for (i = sd_idx; i < sd_lmt; i++) {
+ pd_table = &info->hmc_info->sd_table.sd_entry[i].u.pd_table;
+ if (!info->hmc_info->sd_table.sd_entry[i].valid)
+ continue;
+ switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
+ case IRDMA_SD_TYPE_DIRECT:
+ ret_code = irdma_prep_remove_sd_bp(info->hmc_info, i);
+ if (!ret_code) {
+ info->hmc_info->sd_indexes[info->del_sd_cnt] =
+ (u16)i;
+ info->del_sd_cnt++;
+ }
+ break;
+ case IRDMA_SD_TYPE_PAGED:
+ ret_code = irdma_prep_remove_pd_page(info->hmc_info, i);
+ if (ret_code)
+ break;
+ if (dev->hmc_info != info->hmc_info &&
+ info->rsrc_type == IRDMA_HMC_IW_PBLE &&
+ pd_table->pd_entry) {
+ kfree(pd_table->pd_entry_virt_mem.va);
+ pd_table->pd_entry = NULL;
+ }
+ info->hmc_info->sd_indexes[info->del_sd_cnt] = (u16)i;
+ info->del_sd_cnt++;
+ break;
+ default:
+ break;
+ }
+ }
+ return irdma_finish_del_sd_reg(dev, info, reset);
+}
+
+/**
+ * irdma_add_sd_table_entry - Adds a segment descriptor to the table
+ * @hw: pointer to our hw struct
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @sd_index: segment descriptor index to manipulate
+ * @type: what type of segment descriptor we're manipulating
+ * @direct_mode_sz: size to alloc in direct mode
+ */
+enum irdma_status_code irdma_add_sd_table_entry(struct irdma_hw *hw,
+ struct irdma_hmc_info *hmc_info,
+ u32 sd_index,
+ enum irdma_sd_entry_type type,
+ u64 direct_mode_sz)
+{
+ struct irdma_hmc_sd_entry *sd_entry;
+ struct irdma_dma_mem dma_mem;
+ u64 alloc_len;
+
+ sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
+ if (!sd_entry->valid) {
+ if (type == IRDMA_SD_TYPE_PAGED)
+ alloc_len = IRDMA_HMC_PAGED_BP_SIZE;
+ else
+ alloc_len = direct_mode_sz;
+
+ /* allocate a 4K pd page or 2M backing page */
+ dma_mem.size = ALIGN(alloc_len, IRDMA_HMC_PD_BP_BUF_ALIGNMENT);
+ dma_mem.va = dma_alloc_coherent(hw->device, dma_mem.size,
+ &dma_mem.pa, GFP_KERNEL);
+ if (!dma_mem.va)
+ return IRDMA_ERR_NO_MEMORY;
+ if (type == IRDMA_SD_TYPE_PAGED) {
+ struct irdma_virt_mem *vmem =
+ &sd_entry->u.pd_table.pd_entry_virt_mem;
+
+ vmem->size = sizeof(struct irdma_hmc_pd_entry) * 512;
+ vmem->va = kzalloc(vmem->size, GFP_KERNEL);
+ if (!vmem->va) {
+ dma_free_coherent(hw->device, dma_mem.size,
+ dma_mem.va, dma_mem.pa);
+ dma_mem.va = NULL;
+ return IRDMA_ERR_NO_MEMORY;
+ }
+ sd_entry->u.pd_table.pd_entry = vmem->va;
+
+ memcpy(&sd_entry->u.pd_table.pd_page_addr, &dma_mem,
+ sizeof(sd_entry->u.pd_table.pd_page_addr));
+ } else {
+ memcpy(&sd_entry->u.bp.addr, &dma_mem,
+ sizeof(sd_entry->u.bp.addr));
+
+ sd_entry->u.bp.sd_pd_index = sd_index;
+ }
+
+ hmc_info->sd_table.sd_entry[sd_index].entry_type = type;
+ hmc_info->sd_table.use_cnt++;
+ }
+ if (sd_entry->entry_type == IRDMA_SD_TYPE_DIRECT)
+ sd_entry->u.bp.use_cnt++;
+
+ return 0;
+}
+
+/**
+ * irdma_add_pd_table_entry - Adds page descriptor to the specified table
+ * @dev: pointer to our device structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @pd_index: which page descriptor index to manipulate
+ * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.
+ *
+ * This function:
+ * 1. Initializes the pd entry
+ * 2. Adds pd_entry in the pd_table
+ * 3. Mark the entry valid in irdma_hmc_pd_entry structure
+ * 4. Initializes the pd_entry's ref count to 1
+ * assumptions:
+ * 1. The memory for pd should be pinned down, physically contiguous and
+ * aligned on 4K boundary and zeroed memory.
+ * 2. It should be 4K in size.
+ */
+enum irdma_status_code irdma_add_pd_table_entry(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info,
+ u32 pd_index,
+ struct irdma_dma_mem *rsrc_pg)
+{
+ struct irdma_hmc_pd_table *pd_table;
+ struct irdma_hmc_pd_entry *pd_entry;
+ struct irdma_dma_mem mem;
+ struct irdma_dma_mem *page = &mem;
+ u32 sd_idx, rel_pd_idx;
+ u64 *pd_addr;
+ u64 page_desc;
+
+ if (pd_index / IRDMA_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt)
+ return IRDMA_ERR_INVALID_PAGE_DESC_INDEX;
+
+ sd_idx = (pd_index / IRDMA_HMC_PD_CNT_IN_SD);
+ if (hmc_info->sd_table.sd_entry[sd_idx].entry_type !=
+ IRDMA_SD_TYPE_PAGED)
+ return 0;
+
+ rel_pd_idx = (pd_index % IRDMA_HMC_PD_CNT_IN_SD);
+ pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+ pd_entry = &pd_table->pd_entry[rel_pd_idx];
+ if (!pd_entry->valid) {
+ if (rsrc_pg) {
+ pd_entry->rsrc_pg = true;
+ page = rsrc_pg;
+ } else {
+ page->size = ALIGN(IRDMA_HMC_PAGED_BP_SIZE,
+ IRDMA_HMC_PD_BP_BUF_ALIGNMENT);
+ page->va = dma_alloc_coherent(dev->hw->device,
+ page->size, &page->pa,
+ GFP_KERNEL);
+ if (!page->va)
+ return IRDMA_ERR_NO_MEMORY;
+
+ pd_entry->rsrc_pg = false;
+ }
+
+ memcpy(&pd_entry->bp.addr, page, sizeof(pd_entry->bp.addr));
+ pd_entry->bp.sd_pd_index = pd_index;
+ pd_entry->bp.entry_type = IRDMA_SD_TYPE_PAGED;
+ page_desc = page->pa | 0x1;
+ pd_addr = pd_table->pd_page_addr.va;
+ pd_addr += rel_pd_idx;
+ memcpy(pd_addr, &page_desc, sizeof(*pd_addr));
+ pd_entry->sd_index = sd_idx;
+ pd_entry->valid = true;
+ pd_table->use_cnt++;
+ irdma_invalidate_pf_hmc_pd(dev, sd_idx, rel_pd_idx);
+ }
+ pd_entry->bp.use_cnt++;
+
+ return 0;
+}
+
+/**
+ * irdma_remove_pd_bp - remove a backing page from a page descriptor
+ * @dev: pointer to our HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ *
+ * This function:
+ * 1. Marks the entry in pd table (for paged address mode) or in sd table
+ * (for direct address mode) invalid.
+ * 2. Write to register PMPDINV to invalidate the backing page in FV cache
+ * 3. Decrement the ref count for the pd _entry
+ * assumptions:
+ * 1. Caller can deallocate the memory used by backing storage after this
+ * function returns.
+ */
+enum irdma_status_code irdma_remove_pd_bp(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info,
+ u32 idx)
+{
+ struct irdma_hmc_pd_entry *pd_entry;
+ struct irdma_hmc_pd_table *pd_table;
+ struct irdma_hmc_sd_entry *sd_entry;
+ u32 sd_idx, rel_pd_idx;
+ struct irdma_dma_mem *mem;
+ u64 *pd_addr;
+
+ sd_idx = idx / IRDMA_HMC_PD_CNT_IN_SD;
+ rel_pd_idx = idx % IRDMA_HMC_PD_CNT_IN_SD;
+ if (sd_idx >= hmc_info->sd_table.sd_cnt)
+ return IRDMA_ERR_INVALID_PAGE_DESC_INDEX;
+
+ sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
+ if (sd_entry->entry_type != IRDMA_SD_TYPE_PAGED)
+ return IRDMA_ERR_INVALID_SD_TYPE;
+
+ pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+ pd_entry = &pd_table->pd_entry[rel_pd_idx];
+ if (--pd_entry->bp.use_cnt)
+ return 0;
+
+ pd_entry->valid = false;
+ pd_table->use_cnt--;
+ pd_addr = pd_table->pd_page_addr.va;
+ pd_addr += rel_pd_idx;
+ memset(pd_addr, 0, sizeof(u64));
+ irdma_invalidate_pf_hmc_pd(dev, sd_idx, idx);
+
+ if (!pd_entry->rsrc_pg) {
+ mem = &pd_entry->bp.addr;
+ if (!mem || !mem->va)
+ return IRDMA_ERR_PARAM;
+
+ dma_free_coherent(dev->hw->device, mem->size, mem->va,
+ mem->pa);
+ mem->va = NULL;
+ }
+ if (!pd_table->use_cnt)
+ kfree(pd_table->pd_entry_virt_mem.va);
+
+ return 0;
+}
+
+/**
+ * irdma_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ */
+enum irdma_status_code irdma_prep_remove_sd_bp(struct irdma_hmc_info *hmc_info,
+ u32 idx)
+{
+ struct irdma_hmc_sd_entry *sd_entry;
+
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+ if (--sd_entry->u.bp.use_cnt)
+ return IRDMA_ERR_NOT_READY;
+
+ hmc_info->sd_table.use_cnt--;
+ sd_entry->valid = false;
+
+ return 0;
+}
+
+/**
+ * irdma_prep_remove_pd_page - Prepares to remove a PD page from sd entry.
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: segment descriptor index to find the relevant page descriptor
+ */
+enum irdma_status_code
+irdma_prep_remove_pd_page(struct irdma_hmc_info *hmc_info, u32 idx)
+{
+ struct irdma_hmc_sd_entry *sd_entry;
+
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+
+ if (sd_entry->u.pd_table.use_cnt)
+ return IRDMA_ERR_NOT_READY;
+
+ sd_entry->valid = false;
+ hmc_info->sd_table.use_cnt--;
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/irdma/hmc.h b/drivers/infiniband/hw/irdma/hmc.h
new file mode 100644
index 000000000000..e2139c788b1b
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/hmc.h
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2015 - 2020 Intel Corporation */
+#ifndef IRDMA_HMC_H
+#define IRDMA_HMC_H
+
+#include "defs.h"
+
+#define IRDMA_HMC_MAX_BP_COUNT 512
+#define IRDMA_MAX_SD_ENTRIES 11
+#define IRDMA_HW_DBG_HMC_INVALID_BP_MARK 0xca
+#define IRDMA_HMC_INFO_SIGNATURE 0x484d5347
+#define IRDMA_HMC_PD_CNT_IN_SD 512
+#define IRDMA_HMC_DIRECT_BP_SIZE 0x200000
+#define IRDMA_HMC_MAX_SD_COUNT 8192
+#define IRDMA_HMC_PAGED_BP_SIZE 4096
+#define IRDMA_HMC_PD_BP_BUF_ALIGNMENT 4096
+#define IRDMA_FIRST_VF_FPM_ID 8
+#define FPM_MULTIPLIER 1024
+
+enum irdma_hmc_rsrc_type {
+ IRDMA_HMC_IW_QP = 0,
+ IRDMA_HMC_IW_CQ = 1,
+ IRDMA_HMC_IW_RESERVED = 2,
+ IRDMA_HMC_IW_HTE = 3,
+ IRDMA_HMC_IW_ARP = 4,
+ IRDMA_HMC_IW_APBVT_ENTRY = 5,
+ IRDMA_HMC_IW_MR = 6,
+ IRDMA_HMC_IW_XF = 7,
+ IRDMA_HMC_IW_XFFL = 8,
+ IRDMA_HMC_IW_Q1 = 9,
+ IRDMA_HMC_IW_Q1FL = 10,
+ IRDMA_HMC_IW_TIMER = 11,
+ IRDMA_HMC_IW_FSIMC = 12,
+ IRDMA_HMC_IW_FSIAV = 13,
+ IRDMA_HMC_IW_PBLE = 14,
+ IRDMA_HMC_IW_RRF = 15,
+ IRDMA_HMC_IW_RRFFL = 16,
+ IRDMA_HMC_IW_HDR = 17,
+ IRDMA_HMC_IW_MD = 18,
+ IRDMA_HMC_IW_OOISC = 19,
+ IRDMA_HMC_IW_OOISCFFL = 20,
+ IRDMA_HMC_IW_MAX, /* Must be last entry */
+};
+
+enum irdma_sd_entry_type {
+ IRDMA_SD_TYPE_INVALID = 0,
+ IRDMA_SD_TYPE_PAGED = 1,
+ IRDMA_SD_TYPE_DIRECT = 2,
+};
+
+struct irdma_hmc_obj_info {
+ u64 base;
+ u32 max_cnt;
+ u32 cnt;
+ u64 size;
+};
+
+struct irdma_hmc_bp {
+ enum irdma_sd_entry_type entry_type;
+ struct irdma_dma_mem addr;
+ u32 sd_pd_index;
+ u32 use_cnt;
+};
+
+struct irdma_hmc_pd_entry {
+ struct irdma_hmc_bp bp;
+ u32 sd_index;
+ bool rsrc_pg:1;
+ bool valid:1;
+};
+
+struct irdma_hmc_pd_table {
+ struct irdma_dma_mem pd_page_addr;
+ struct irdma_hmc_pd_entry *pd_entry;
+ struct irdma_virt_mem pd_entry_virt_mem;
+ u32 use_cnt;
+ u32 sd_index;
+};
+
+struct irdma_hmc_sd_entry {
+ enum irdma_sd_entry_type entry_type;
+ bool valid;
+ union {
+ struct irdma_hmc_pd_table pd_table;
+ struct irdma_hmc_bp bp;
+ } u;
+};
+
+struct irdma_hmc_sd_table {
+ struct irdma_virt_mem addr;
+ u32 sd_cnt;
+ u32 use_cnt;
+ struct irdma_hmc_sd_entry *sd_entry;
+};
+
+struct irdma_hmc_info {
+ u32 signature;
+ u8 hmc_fn_id;
+ u16 first_sd_index;
+ struct irdma_hmc_obj_info *hmc_obj;
+ struct irdma_virt_mem hmc_obj_virt_mem;
+ struct irdma_hmc_sd_table sd_table;
+ u16 sd_indexes[IRDMA_HMC_MAX_SD_COUNT];
+};
+
+struct irdma_update_sd_entry {
+ u64 cmd;
+ u64 data;
+};
+
+struct irdma_update_sds_info {
+ u32 cnt;
+ u8 hmc_fn_id;
+ struct irdma_update_sd_entry entry[IRDMA_MAX_SD_ENTRIES];
+};
+
+struct irdma_ccq_cqe_info;
+struct irdma_hmc_fcn_info {
+ u32 vf_id;
+ u8 free_fcn;
+};
+
+struct irdma_hmc_create_obj_info {
+ struct irdma_hmc_info *hmc_info;
+ struct irdma_virt_mem add_sd_virt_mem;
+ u32 rsrc_type;
+ u32 start_idx;
+ u32 count;
+ u32 add_sd_cnt;
+ enum irdma_sd_entry_type entry_type;
+ bool privileged;
+};
+
+struct irdma_hmc_del_obj_info {
+ struct irdma_hmc_info *hmc_info;
+ struct irdma_virt_mem del_sd_virt_mem;
+ u32 rsrc_type;
+ u32 start_idx;
+ u32 count;
+ u32 del_sd_cnt;
+ bool privileged;
+};
+
+enum irdma_status_code irdma_copy_dma_mem(struct irdma_hw *hw, void *dest_buf,
+ struct irdma_dma_mem *src_mem,
+ u64 src_offset, u64 size);
+enum irdma_status_code
+irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev,
+ struct irdma_hmc_create_obj_info *info);
+enum irdma_status_code irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev,
+ struct irdma_hmc_del_obj_info *info,
+ bool reset);
+enum irdma_status_code irdma_hmc_sd_one(struct irdma_sc_dev *dev, u8 hmc_fn_id,
+ u64 pa, u32 sd_idx,
+ enum irdma_sd_entry_type type,
+ bool setsd);
+enum irdma_status_code
+irdma_update_sds_noccq(struct irdma_sc_dev *dev,
+ struct irdma_update_sds_info *info);
+struct irdma_vfdev *irdma_vfdev_from_fpm(struct irdma_sc_dev *dev,
+ u8 hmc_fn_id);
+struct irdma_hmc_info *irdma_vf_hmcinfo_from_fpm(struct irdma_sc_dev *dev,
+ u8 hmc_fn_id);
+enum irdma_status_code irdma_add_sd_table_entry(struct irdma_hw *hw,
+ struct irdma_hmc_info *hmc_info,
+ u32 sd_index,
+ enum irdma_sd_entry_type type,
+ u64 direct_mode_sz);
+enum irdma_status_code irdma_add_pd_table_entry(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info,
+ u32 pd_index,
+ struct irdma_dma_mem *rsrc_pg);
+enum irdma_status_code irdma_remove_pd_bp(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info,
+ u32 idx);
+enum irdma_status_code irdma_prep_remove_sd_bp(struct irdma_hmc_info *hmc_info,
+ u32 idx);
+enum irdma_status_code
+irdma_prep_remove_pd_page(struct irdma_hmc_info *hmc_info, u32 idx);
+#endif /* IRDMA_HMC_H */
diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
new file mode 100644
index 000000000000..7afb8a6a0526
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/hw.c
@@ -0,0 +1,2725 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#include "main.h"
+
+static struct irdma_rsrc_limits rsrc_limits_table[] = {
+ [0] = {
+ .qplimit = SZ_128,
+ },
+ [1] = {
+ .qplimit = SZ_1K,
+ },
+ [2] = {
+ .qplimit = SZ_2K,
+ },
+ [3] = {
+ .qplimit = SZ_4K,
+ },
+ [4] = {
+ .qplimit = SZ_16K,
+ },
+ [5] = {
+ .qplimit = SZ_64K,
+ },
+ [6] = {
+ .qplimit = SZ_128K,
+ },
+ [7] = {
+ .qplimit = SZ_256K,
+ },
+};
+
+/* types of hmc objects */
+static enum irdma_hmc_rsrc_type iw_hmc_obj_types[] = {
+ IRDMA_HMC_IW_QP,
+ IRDMA_HMC_IW_CQ,
+ IRDMA_HMC_IW_HTE,
+ IRDMA_HMC_IW_ARP,
+ IRDMA_HMC_IW_APBVT_ENTRY,
+ IRDMA_HMC_IW_MR,
+ IRDMA_HMC_IW_XF,
+ IRDMA_HMC_IW_XFFL,
+ IRDMA_HMC_IW_Q1,
+ IRDMA_HMC_IW_Q1FL,
+ IRDMA_HMC_IW_TIMER,
+ IRDMA_HMC_IW_FSIMC,
+ IRDMA_HMC_IW_FSIAV,
+ IRDMA_HMC_IW_RRF,
+ IRDMA_HMC_IW_RRFFL,
+ IRDMA_HMC_IW_HDR,
+ IRDMA_HMC_IW_MD,
+ IRDMA_HMC_IW_OOISC,
+ IRDMA_HMC_IW_OOISCFFL,
+};
+
+/**
+ * irdma_iwarp_ce_handler - handle iwarp completions
+ * @iwcq: iwarp cq receiving event
+ */
+static void irdma_iwarp_ce_handler(struct irdma_sc_cq *iwcq)
+{
+ struct irdma_cq *cq = iwcq->back_cq;
+
+ if (cq->ibcq.comp_handler)
+ cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+}
+
+/**
+ * irdma_puda_ce_handler - handle puda completion events
+ * @rf: RDMA PCI function
+ * @cq: puda completion q for event
+ */
+static void irdma_puda_ce_handler(struct irdma_pci_f *rf,
+ struct irdma_sc_cq *cq)
+{
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ enum irdma_status_code status;
+ u32 compl_error;
+
+ do {
+ status = irdma_puda_poll_cmpl(dev, cq, &compl_error);
+ if (status == IRDMA_ERR_Q_EMPTY)
+ break;
+ if (status) {
+ ibdev_dbg(to_ibdev(dev), "ERR: puda status = %d\n", status);
+ break;
+ }
+ if (compl_error) {
+ ibdev_dbg(to_ibdev(dev), "ERR: puda compl_err =0x%x\n",
+ compl_error);
+ break;
+ }
+ } while (1);
+
+ irdma_sc_ccq_arm(cq);
+}
+
+/**
+ * irdma_process_ceq - handle ceq for completions
+ * @rf: RDMA PCI function
+ * @ceq: ceq having cq for completion
+ */
+static void irdma_process_ceq(struct irdma_pci_f *rf, struct irdma_ceq *ceq)
+{
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct irdma_sc_ceq *sc_ceq;
+ struct irdma_sc_cq *cq;
+ unsigned long flags;
+
+ sc_ceq = &ceq->sc_ceq;
+ do {
+ spin_lock_irqsave(&ceq->ce_lock, flags);
+ cq = irdma_sc_process_ceq(dev, sc_ceq);
+ if (!cq) {
+ spin_unlock_irqrestore(&ceq->ce_lock, flags);
+ break;
+ }
+
+ if (cq->cq_type == IRDMA_CQ_TYPE_IWARP)
+ irdma_iwarp_ce_handler(cq);
+
+ spin_unlock_irqrestore(&ceq->ce_lock, flags);
+
+ if (cq->cq_type == IRDMA_CQ_TYPE_CQP)
+ queue_work(rf->cqp_cmpl_wq, &rf->cqp_cmpl_work);
+ else if (cq->cq_type == IRDMA_CQ_TYPE_ILQ ||
+ cq->cq_type == IRDMA_CQ_TYPE_IEQ)
+ irdma_puda_ce_handler(rf, cq);
+ } while (1);
+}
+
+static void irdma_set_flush_fields(struct irdma_sc_qp *qp,
+ struct irdma_aeqe_info *info)
+{
+ qp->sq_flush_code = info->sq;
+ qp->rq_flush_code = info->rq;
+ qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+
+ switch (info->ae_id) {
+ case IRDMA_AE_AMP_UNALLOCATED_STAG:
+ case IRDMA_AE_AMP_BOUNDS_VIOLATION:
+ case IRDMA_AE_AMP_INVALID_STAG:
+ qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ fallthrough;
+ case IRDMA_AE_AMP_BAD_PD:
+ case IRDMA_AE_UDA_XMIT_BAD_PD:
+ qp->flush_code = FLUSH_PROT_ERR;
+ break;
+ case IRDMA_AE_AMP_BAD_QP:
+ qp->flush_code = FLUSH_LOC_QP_OP_ERR;
+ break;
+ case IRDMA_AE_AMP_BAD_STAG_KEY:
+ case IRDMA_AE_AMP_BAD_STAG_INDEX:
+ case IRDMA_AE_AMP_TO_WRAP:
+ case IRDMA_AE_AMP_RIGHTS_VIOLATION:
+ case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
+ case IRDMA_AE_PRIV_OPERATION_DENIED:
+ case IRDMA_AE_IB_INVALID_REQUEST:
+ case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
+ case IRDMA_AE_IB_REMOTE_OP_ERROR:
+ qp->flush_code = FLUSH_REM_ACCESS_ERR;
+ qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ break;
+ case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
+ case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
+ case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
+ case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT:
+ case IRDMA_AE_UDA_L4LEN_INVALID:
+ case IRDMA_AE_ROCE_RSP_LENGTH_ERROR:
+ qp->flush_code = FLUSH_LOC_LEN_ERR;
+ break;
+ case IRDMA_AE_LCE_QP_CATASTROPHIC:
+ qp->flush_code = FLUSH_FATAL_ERR;
+ break;
+ case IRDMA_AE_DDP_UBE_INVALID_MO:
+ case IRDMA_AE_IB_RREQ_AND_Q1_FULL:
+ case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
+ qp->flush_code = FLUSH_GENERAL_ERR;
+ break;
+ default:
+ qp->flush_code = FLUSH_FATAL_ERR;
+ break;
+ }
+}
+
+/**
+ * irdma_process_aeq - handle aeq events
+ * @rf: RDMA PCI function
+ */
+static void irdma_process_aeq(struct irdma_pci_f *rf)
+{
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct irdma_aeq *aeq = &rf->aeq;
+ struct irdma_sc_aeq *sc_aeq = &aeq->sc_aeq;
+ struct irdma_aeqe_info aeinfo;
+ struct irdma_aeqe_info *info = &aeinfo;
+ int ret;
+ struct irdma_qp *iwqp = NULL;
+ struct irdma_sc_cq *cq = NULL;
+ struct irdma_cq *iwcq = NULL;
+ struct irdma_sc_qp *qp = NULL;
+ struct irdma_qp_host_ctx_info *ctx_info = NULL;
+ struct irdma_device *iwdev = rf->iwdev;
+ unsigned long flags;
+
+ u32 aeqcnt = 0;
+
+ if (!sc_aeq->size)
+ return;
+
+ do {
+ memset(info, 0, sizeof(*info));
+ ret = irdma_sc_get_next_aeqe(sc_aeq, info);
+ if (ret)
+ break;
+
+ aeqcnt++;
+ ibdev_dbg(&iwdev->ibdev,
+ "AEQ: ae_id = 0x%x bool qp=%d qp_id = %d tcp_state=%d iwarp_state=%d ae_src=%d\n",
+ info->ae_id, info->qp, info->qp_cq_id, info->tcp_state,
+ info->iwarp_state, info->ae_src);
+
+ if (info->qp) {
+ spin_lock_irqsave(&rf->qptable_lock, flags);
+ iwqp = rf->qp_table[info->qp_cq_id];
+ if (!iwqp) {
+ spin_unlock_irqrestore(&rf->qptable_lock,
+ flags);
+ if (info->ae_id == IRDMA_AE_QP_SUSPEND_COMPLETE) {
+ atomic_dec(&iwdev->vsi.qp_suspend_reqs);
+ wake_up(&iwdev->suspend_wq);
+ continue;
+ }
+ ibdev_dbg(&iwdev->ibdev, "AEQ: qp_id %d is already freed\n",
+ info->qp_cq_id);
+ continue;
+ }
+ irdma_qp_add_ref(&iwqp->ibqp);
+ spin_unlock_irqrestore(&rf->qptable_lock, flags);
+ qp = &iwqp->sc_qp;
+ spin_lock_irqsave(&iwqp->lock, flags);
+ iwqp->hw_tcp_state = info->tcp_state;
+ iwqp->hw_iwarp_state = info->iwarp_state;
+ if (info->ae_id != IRDMA_AE_QP_SUSPEND_COMPLETE)
+ iwqp->last_aeq = info->ae_id;
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ ctx_info = &iwqp->ctx_info;
+ if (rdma_protocol_roce(&iwqp->iwdev->ibdev, 1))
+ ctx_info->roce_info->err_rq_idx_valid = true;
+ else
+ ctx_info->iwarp_info->err_rq_idx_valid = true;
+ } else {
+ if (info->ae_id != IRDMA_AE_CQ_OPERATION_ERROR)
+ continue;
+ }
+
+ switch (info->ae_id) {
+ struct irdma_cm_node *cm_node;
+ case IRDMA_AE_LLP_CONNECTION_ESTABLISHED:
+ cm_node = iwqp->cm_node;
+ if (cm_node->accept_pend) {
+ atomic_dec(&cm_node->listener->pend_accepts_cnt);
+ cm_node->accept_pend = 0;
+ }
+ iwqp->rts_ae_rcvd = 1;
+ wake_up_interruptible(&iwqp->waitq);
+ break;
+ case IRDMA_AE_LLP_FIN_RECEIVED:
+ case IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE:
+ if (qp->term_flags)
+ break;
+ if (atomic_inc_return(&iwqp->close_timer_started) == 1) {
+ iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSE_WAIT;
+ if (iwqp->hw_tcp_state == IRDMA_TCP_STATE_CLOSE_WAIT &&
+ iwqp->ibqp_state == IB_QPS_RTS) {
+ irdma_next_iw_state(iwqp,
+ IRDMA_QP_STATE_CLOSING,
+ 0, 0, 0);
+ irdma_cm_disconn(iwqp);
+ }
+ irdma_schedule_cm_timer(iwqp->cm_node,
+ (struct irdma_puda_buf *)iwqp,
+ IRDMA_TIMER_TYPE_CLOSE,
+ 1, 0);
+ }
+ break;
+ case IRDMA_AE_LLP_CLOSE_COMPLETE:
+ if (qp->term_flags)
+ irdma_terminate_done(qp, 0);
+ else
+ irdma_cm_disconn(iwqp);
+ break;
+ case IRDMA_AE_BAD_CLOSE:
+ case IRDMA_AE_RESET_SENT:
+ irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, 1, 0,
+ 0);
+ irdma_cm_disconn(iwqp);
+ break;
+ case IRDMA_AE_LLP_CONNECTION_RESET:
+ if (atomic_read(&iwqp->close_timer_started))
+ break;
+ irdma_cm_disconn(iwqp);
+ break;
+ case IRDMA_AE_QP_SUSPEND_COMPLETE:
+ if (iwqp->iwdev->vsi.tc_change_pending) {
+ atomic_dec(&iwqp->sc_qp.vsi->qp_suspend_reqs);
+ wake_up(&iwqp->iwdev->suspend_wq);
+ }
+ break;
+ case IRDMA_AE_TERMINATE_SENT:
+ irdma_terminate_send_fin(qp);
+ break;
+ case IRDMA_AE_LLP_TERMINATE_RECEIVED:
+ irdma_terminate_received(qp, info);
+ break;
+ case IRDMA_AE_CQ_OPERATION_ERROR:
+ ibdev_err(&iwdev->ibdev,
+ "Processing an iWARP related AE for CQ misc = 0x%04X\n",
+ info->ae_id);
+ cq = (struct irdma_sc_cq *)(unsigned long)
+ info->compl_ctx;
+
+ iwcq = cq->back_cq;
+
+ if (iwcq->ibcq.event_handler) {
+ struct ib_event ibevent;
+
+ ibevent.device = iwcq->ibcq.device;
+ ibevent.event = IB_EVENT_CQ_ERR;
+ ibevent.element.cq = &iwcq->ibcq;
+ iwcq->ibcq.event_handler(&ibevent,
+ iwcq->ibcq.cq_context);
+ }
+ break;
+ case IRDMA_AE_RESET_NOT_SENT:
+ case IRDMA_AE_LLP_DOUBT_REACHABILITY:
+ case IRDMA_AE_RESOURCE_EXHAUSTION:
+ break;
+ case IRDMA_AE_PRIV_OPERATION_DENIED:
+ case IRDMA_AE_STAG_ZERO_INVALID:
+ case IRDMA_AE_IB_RREQ_AND_Q1_FULL:
+ case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION:
+ case IRDMA_AE_DDP_UBE_INVALID_MO:
+ case IRDMA_AE_DDP_UBE_INVALID_QN:
+ case IRDMA_AE_DDP_NO_L_BIT:
+ case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
+ case IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
+ case IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST:
+ case IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
+ case IRDMA_AE_INVALID_ARP_ENTRY:
+ case IRDMA_AE_INVALID_TCP_OPTION_RCVD:
+ case IRDMA_AE_STALE_ARP_ENTRY:
+ case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
+ case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
+ case IRDMA_AE_LLP_SYN_RECEIVED:
+ case IRDMA_AE_LLP_TOO_MANY_RETRIES:
+ case IRDMA_AE_LCE_QP_CATASTROPHIC:
+ case IRDMA_AE_LCE_FUNCTION_CATASTROPHIC:
+ case IRDMA_AE_LCE_CQ_CATASTROPHIC:
+ case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
+ if (rdma_protocol_roce(&iwdev->ibdev, 1))
+ ctx_info->roce_info->err_rq_idx_valid = false;
+ else
+ ctx_info->iwarp_info->err_rq_idx_valid = false;
+ fallthrough;
+ default:
+ ibdev_err(&iwdev->ibdev, "abnormal ae_id = 0x%x bool qp=%d qp_id = %d\n",
+ info->ae_id, info->qp, info->qp_cq_id);
+ if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
+ if (!info->sq && ctx_info->roce_info->err_rq_idx_valid) {
+ ctx_info->roce_info->err_rq_idx = info->wqe_idx;
+ irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va,
+ ctx_info);
+ }
+ irdma_set_flush_fields(qp, info);
+ irdma_cm_disconn(iwqp);
+ break;
+ }
+ if (!info->sq && ctx_info->iwarp_info->err_rq_idx_valid) {
+ ctx_info->iwarp_info->err_rq_idx = info->wqe_idx;
+ ctx_info->tcp_info_valid = false;
+ ctx_info->iwarp_info_valid = true;
+ irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va,
+ ctx_info);
+ }
+ if (iwqp->hw_iwarp_state != IRDMA_QP_STATE_RTS &&
+ iwqp->hw_iwarp_state != IRDMA_QP_STATE_TERMINATE) {
+ irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, 1, 0, 0);
+ irdma_cm_disconn(iwqp);
+ } else {
+ irdma_terminate_connection(qp, info);
+ }
+ break;
+ }
+ if (info->qp)
+ irdma_qp_rem_ref(&iwqp->ibqp);
+ } while (1);
+
+ if (aeqcnt)
+ irdma_sc_repost_aeq_entries(dev, aeqcnt);
+}
+
+/**
+ * irdma_ena_intr - set up device interrupts
+ * @dev: hardware control device structure
+ * @msix_id: id of the interrupt to be enabled
+ */
+static void irdma_ena_intr(struct irdma_sc_dev *dev, u32 msix_id)
+{
+ dev->irq_ops->irdma_en_irq(dev, msix_id);
+}
+
+/**
+ * irdma_dpc - tasklet for aeq and ceq 0
+ * @t: tasklet_struct ptr
+ */
+static void irdma_dpc(struct tasklet_struct *t)
+{
+ struct irdma_pci_f *rf = from_tasklet(rf, t, dpc_tasklet);
+
+ if (rf->msix_shared)
+ irdma_process_ceq(rf, rf->ceqlist);
+ irdma_process_aeq(rf);
+ irdma_ena_intr(&rf->sc_dev, rf->iw_msixtbl[0].idx);
+}
+
+/**
+ * irdma_ceq_dpc - dpc handler for CEQ
+ * @t: tasklet_struct ptr
+ */
+static void irdma_ceq_dpc(struct tasklet_struct *t)
+{
+ struct irdma_ceq *iwceq = from_tasklet(iwceq, t, dpc_tasklet);
+ struct irdma_pci_f *rf = iwceq->rf;
+
+ irdma_process_ceq(rf, iwceq);
+ irdma_ena_intr(&rf->sc_dev, iwceq->msix_idx);
+}
+
+/**
+ * irdma_save_msix_info - copy msix vector information to iwarp device
+ * @rf: RDMA PCI function
+ *
+ * Allocate iwdev msix table and copy the msix info to the table
+ * Return 0 if successful, otherwise return error
+ */
+static enum irdma_status_code irdma_save_msix_info(struct irdma_pci_f *rf)
+{
+ struct irdma_qvlist_info *iw_qvlist;
+ struct irdma_qv_info *iw_qvinfo;
+ struct msix_entry *pmsix;
+ u32 ceq_idx;
+ u32 i;
+ size_t size;
+
+ if (!rf->msix_count)
+ return IRDMA_ERR_NO_INTR;
+
+ size = sizeof(struct irdma_msix_vector) * rf->msix_count;
+ size += struct_size(iw_qvlist, qv_info, rf->msix_count);
+ rf->iw_msixtbl = kzalloc(size, GFP_KERNEL);
+ if (!rf->iw_msixtbl)
+ return IRDMA_ERR_NO_MEMORY;
+
+ rf->iw_qvlist = (struct irdma_qvlist_info *)
+ (&rf->iw_msixtbl[rf->msix_count]);
+ iw_qvlist = rf->iw_qvlist;
+ iw_qvinfo = iw_qvlist->qv_info;
+ iw_qvlist->num_vectors = rf->msix_count;
+ if (rf->msix_count <= num_online_cpus())
+ rf->msix_shared = true;
+
+ pmsix = rf->msix_entries;
+ for (i = 0, ceq_idx = 0; i < rf->msix_count; i++, iw_qvinfo++) {
+ rf->iw_msixtbl[i].idx = pmsix->entry;
+ rf->iw_msixtbl[i].irq = pmsix->vector;
+ rf->iw_msixtbl[i].cpu_affinity = ceq_idx;
+ if (!i) {
+ iw_qvinfo->aeq_idx = 0;
+ if (rf->msix_shared)
+ iw_qvinfo->ceq_idx = ceq_idx++;
+ else
+ iw_qvinfo->ceq_idx = IRDMA_Q_INVALID_IDX;
+ } else {
+ iw_qvinfo->aeq_idx = IRDMA_Q_INVALID_IDX;
+ iw_qvinfo->ceq_idx = ceq_idx++;
+ }
+ iw_qvinfo->itr_idx = 3;
+ iw_qvinfo->v_idx = rf->iw_msixtbl[i].idx;
+ pmsix++;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_irq_handler - interrupt handler for aeq and ceq0
+ * @irq: Interrupt request number
+ * @data: RDMA PCI function
+ */
+static irqreturn_t irdma_irq_handler(int irq, void *data)
+{
+ struct irdma_pci_f *rf = data;
+
+ tasklet_schedule(&rf->dpc_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * irdma_ceq_handler - interrupt handler for ceq
+ * @irq: interrupt request number
+ * @data: ceq pointer
+ */
+static irqreturn_t irdma_ceq_handler(int irq, void *data)
+{
+ struct irdma_ceq *iwceq = data;
+
+ if (iwceq->irq != irq)
+ ibdev_err(to_ibdev(&iwceq->rf->sc_dev), "expected irq = %d received irq = %d\n",
+ iwceq->irq, irq);
+ tasklet_schedule(&iwceq->dpc_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * irdma_destroy_irq - destroy device interrupts
+ * @rf: RDMA PCI function
+ * @msix_vec: msix vector to disable irq
+ * @dev_id: parameter to pass to free_irq (used during irq setup)
+ *
+ * The function is called when destroying aeq/ceq
+ */
+static void irdma_destroy_irq(struct irdma_pci_f *rf,
+ struct irdma_msix_vector *msix_vec, void *dev_id)
+{
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+
+ dev->irq_ops->irdma_dis_irq(dev, msix_vec->idx);
+ irq_set_affinity_hint(msix_vec->irq, NULL);
+ free_irq(msix_vec->irq, dev_id);
+}
+
+/**
+ * irdma_destroy_cqp - destroy control qp
+ * @rf: RDMA PCI function
+ * @free_hwcqp: 1 if hw cqp should be freed
+ *
+ * Issue destroy cqp request and
+ * free the resources associated with the cqp
+ */
+static void irdma_destroy_cqp(struct irdma_pci_f *rf, bool free_hwcqp)
+{
+ enum irdma_status_code status = 0;
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct irdma_cqp *cqp = &rf->cqp;
+
+ if (rf->cqp_cmpl_wq)
+ destroy_workqueue(rf->cqp_cmpl_wq);
+ if (free_hwcqp)
+ status = irdma_sc_cqp_destroy(dev->cqp);
+ if (status)
+ ibdev_dbg(to_ibdev(dev), "ERR: Destroy CQP failed %d\n", status);
+
+ irdma_cleanup_pending_cqp_op(rf);
+ dma_free_coherent(dev->hw->device, cqp->sq.size, cqp->sq.va,
+ cqp->sq.pa);
+ cqp->sq.va = NULL;
+ kfree(cqp->scratch_array);
+ cqp->scratch_array = NULL;
+ kfree(cqp->cqp_requests);
+ cqp->cqp_requests = NULL;
+}
+
+static void irdma_destroy_virt_aeq(struct irdma_pci_f *rf)
+{
+ struct irdma_aeq *aeq = &rf->aeq;
+ u32 pg_cnt = DIV_ROUND_UP(aeq->mem.size, PAGE_SIZE);
+ dma_addr_t *pg_arr = (dma_addr_t *)aeq->palloc.level1.addr;
+
+ irdma_unmap_vm_page_list(&rf->hw, pg_arr, pg_cnt);
+ irdma_free_pble(rf->pble_rsrc, &aeq->palloc);
+ vfree(aeq->mem.va);
+}
+
+/**
+ * irdma_destroy_aeq - destroy aeq
+ * @rf: RDMA PCI function
+ *
+ * Issue a destroy aeq request and
+ * free the resources associated with the aeq
+ * The function is called during driver unload
+ */
+static void irdma_destroy_aeq(struct irdma_pci_f *rf)
+{
+ enum irdma_status_code status = IRDMA_ERR_NOT_READY;
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct irdma_aeq *aeq = &rf->aeq;
+
+ if (!rf->msix_shared) {
+ rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, rf->iw_msixtbl->idx, false);
+ irdma_destroy_irq(rf, rf->iw_msixtbl, rf);
+ }
+ if (rf->reset)
+ goto exit;
+
+ aeq->sc_aeq.size = 0;
+ status = irdma_cqp_aeq_cmd(dev, &aeq->sc_aeq, IRDMA_OP_AEQ_DESTROY);
+ if (status)
+ ibdev_dbg(to_ibdev(dev), "ERR: Destroy AEQ failed %d\n", status);
+
+exit:
+ if (aeq->virtual_map) {
+ irdma_destroy_virt_aeq(rf);
+ } else {
+ dma_free_coherent(dev->hw->device, aeq->mem.size, aeq->mem.va,
+ aeq->mem.pa);
+ aeq->mem.va = NULL;
+ }
+}
+
+/**
+ * irdma_destroy_ceq - destroy ceq
+ * @rf: RDMA PCI function
+ * @iwceq: ceq to be destroyed
+ *
+ * Issue a destroy ceq request and
+ * free the resources associated with the ceq
+ */
+static void irdma_destroy_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq)
+{
+ enum irdma_status_code status;
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+
+ if (rf->reset)
+ goto exit;
+
+ status = irdma_sc_ceq_destroy(&iwceq->sc_ceq, 0, 1);
+ if (status) {
+ ibdev_dbg(to_ibdev(dev), "ERR: CEQ destroy command failed %d\n", status);
+ goto exit;
+ }
+
+ status = irdma_sc_cceq_destroy_done(&iwceq->sc_ceq);
+ if (status)
+ ibdev_dbg(to_ibdev(dev), "ERR: CEQ destroy completion failed %d\n",
+ status);
+exit:
+ dma_free_coherent(dev->hw->device, iwceq->mem.size, iwceq->mem.va,
+ iwceq->mem.pa);
+ iwceq->mem.va = NULL;
+}
+
+/**
+ * irdma_del_ceq_0 - destroy ceq 0
+ * @rf: RDMA PCI function
+ *
+ * Disable the ceq 0 interrupt and destroy the ceq 0
+ */
+static void irdma_del_ceq_0(struct irdma_pci_f *rf)
+{
+ struct irdma_ceq *iwceq = rf->ceqlist;
+ struct irdma_msix_vector *msix_vec;
+
+ if (rf->msix_shared) {
+ msix_vec = &rf->iw_msixtbl[0];
+ rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev,
+ msix_vec->ceq_id,
+ msix_vec->idx, false);
+ irdma_destroy_irq(rf, msix_vec, rf);
+ } else {
+ msix_vec = &rf->iw_msixtbl[1];
+ irdma_destroy_irq(rf, msix_vec, iwceq);
+ }
+
+ irdma_destroy_ceq(rf, iwceq);
+ rf->sc_dev.ceq_valid = false;
+ rf->ceqs_count = 0;
+}
+
+/**
+ * irdma_del_ceqs - destroy all ceq's except CEQ 0
+ * @rf: RDMA PCI function
+ *
+ * Go through all of the device ceq's, except 0, and for each
+ * ceq disable the ceq interrupt and destroy the ceq
+ */
+static void irdma_del_ceqs(struct irdma_pci_f *rf)
+{
+ struct irdma_ceq *iwceq = &rf->ceqlist[1];
+ struct irdma_msix_vector *msix_vec;
+ u32 i = 0;
+
+ if (rf->msix_shared)
+ msix_vec = &rf->iw_msixtbl[1];
+ else
+ msix_vec = &rf->iw_msixtbl[2];
+
+ for (i = 1; i < rf->ceqs_count; i++, msix_vec++, iwceq++) {
+ rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, msix_vec->ceq_id,
+ msix_vec->idx, false);
+ irdma_destroy_irq(rf, msix_vec, iwceq);
+ irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq,
+ IRDMA_OP_CEQ_DESTROY);
+ dma_free_coherent(rf->sc_dev.hw->device, iwceq->mem.size,
+ iwceq->mem.va, iwceq->mem.pa);
+ iwceq->mem.va = NULL;
+ }
+ rf->ceqs_count = 1;
+}
+
+/**
+ * irdma_destroy_ccq - destroy control cq
+ * @rf: RDMA PCI function
+ *
+ * Issue destroy ccq request and
+ * free the resources associated with the ccq
+ */
+static void irdma_destroy_ccq(struct irdma_pci_f *rf)
+{
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct irdma_ccq *ccq = &rf->ccq;
+ enum irdma_status_code status = 0;
+
+ if (!rf->reset)
+ status = irdma_sc_ccq_destroy(dev->ccq, 0, true);
+ if (status)
+ ibdev_dbg(to_ibdev(dev), "ERR: CCQ destroy failed %d\n", status);
+ dma_free_coherent(dev->hw->device, ccq->mem_cq.size, ccq->mem_cq.va,
+ ccq->mem_cq.pa);
+ ccq->mem_cq.va = NULL;
+}
+
+/**
+ * irdma_close_hmc_objects_type - delete hmc objects of a given type
+ * @dev: iwarp device
+ * @obj_type: the hmc object type to be deleted
+ * @hmc_info: host memory info struct
+ * @privileged: permission to close HMC objects
+ * @reset: true if called before reset
+ */
+static void irdma_close_hmc_objects_type(struct irdma_sc_dev *dev,
+ enum irdma_hmc_rsrc_type obj_type,
+ struct irdma_hmc_info *hmc_info,
+ bool privileged, bool reset)
+{
+ struct irdma_hmc_del_obj_info info = {};
+
+ info.hmc_info = hmc_info;
+ info.rsrc_type = obj_type;
+ info.count = hmc_info->hmc_obj[obj_type].cnt;
+ info.privileged = privileged;
+ if (irdma_sc_del_hmc_obj(dev, &info, reset))
+ ibdev_dbg(to_ibdev(dev), "ERR: del HMC obj of type %d failed\n",
+ obj_type);
+}
+
+/**
+ * irdma_del_hmc_objects - remove all device hmc objects
+ * @dev: iwarp device
+ * @hmc_info: hmc_info to free
+ * @privileged: permission to delete HMC objects
+ * @reset: true if called before reset
+ * @vers: hardware version
+ */
+static void irdma_del_hmc_objects(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info, bool privileged,
+ bool reset, enum irdma_vers vers)
+{
+ unsigned int i;
+
+ for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
+ if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt)
+ irdma_close_hmc_objects_type(dev, iw_hmc_obj_types[i],
+ hmc_info, privileged, reset);
+ if (vers == IRDMA_GEN_1 && i == IRDMA_HMC_IW_TIMER)
+ break;
+ }
+}
+
+/**
+ * irdma_create_hmc_obj_type - create hmc object of a given type
+ * @dev: hardware control device structure
+ * @info: information for the hmc object to create
+ */
+static enum irdma_status_code
+irdma_create_hmc_obj_type(struct irdma_sc_dev *dev,
+ struct irdma_hmc_create_obj_info *info)
+{
+ return irdma_sc_create_hmc_obj(dev, info);
+}
+
+/**
+ * irdma_create_hmc_objs - create all hmc objects for the device
+ * @rf: RDMA PCI function
+ * @privileged: permission to create HMC objects
+ * @vers: HW version
+ *
+ * Create the device hmc objects and allocate hmc pages
+ * Return 0 if successful, otherwise clean up and return error
+ */
+static enum irdma_status_code
+irdma_create_hmc_objs(struct irdma_pci_f *rf, bool privileged, enum irdma_vers vers)
+{
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct irdma_hmc_create_obj_info info = {};
+ enum irdma_status_code status = 0;
+ int i;
+
+ info.hmc_info = dev->hmc_info;
+ info.privileged = privileged;
+ info.entry_type = rf->sd_type;
+
+ for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
+ if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt) {
+ info.rsrc_type = iw_hmc_obj_types[i];
+ info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt;
+ info.add_sd_cnt = 0;
+ status = irdma_create_hmc_obj_type(dev, &info);
+ if (status) {
+ ibdev_dbg(to_ibdev(dev),
+ "ERR: create obj type %d status = %d\n",
+ iw_hmc_obj_types[i], status);
+ break;
+ }
+ }
+ if (vers == IRDMA_GEN_1 && i == IRDMA_HMC_IW_TIMER)
+ break;
+ }
+
+ if (!status)
+ return irdma_sc_static_hmc_pages_allocated(dev->cqp, 0, dev->hmc_fn_id,
+ true, true);
+
+ while (i) {
+ i--;
+ /* destroy the hmc objects of a given type */
+ if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt)
+ irdma_close_hmc_objects_type(dev, iw_hmc_obj_types[i],
+ dev->hmc_info, privileged,
+ false);
+ }
+
+ return status;
+}
+
+/**
+ * irdma_obj_aligned_mem - get aligned memory from device allocated memory
+ * @rf: RDMA PCI function
+ * @memptr: points to the memory addresses
+ * @size: size of memory needed
+ * @mask: mask for the aligned memory
+ *
+ * Get aligned memory of the requested size and
+ * update the memptr to point to the new aligned memory
+ * Return 0 if successful, otherwise return no memory error
+ */
+static enum irdma_status_code
+irdma_obj_aligned_mem(struct irdma_pci_f *rf, struct irdma_dma_mem *memptr,
+ u32 size, u32 mask)
+{
+ unsigned long va, newva;
+ unsigned long extra;
+
+ va = (unsigned long)rf->obj_next.va;
+ newva = va;
+ if (mask)
+ newva = ALIGN(va, (unsigned long)mask + 1ULL);
+ extra = newva - va;
+ memptr->va = (u8 *)va + extra;
+ memptr->pa = rf->obj_next.pa + extra;
+ memptr->size = size;
+ if (((u8 *)memptr->va + size) > ((u8 *)rf->obj_mem.va + rf->obj_mem.size))
+ return IRDMA_ERR_NO_MEMORY;
+
+ rf->obj_next.va = (u8 *)memptr->va + size;
+ rf->obj_next.pa = memptr->pa + size;
+
+ return 0;
+}
+
+/**
+ * irdma_create_cqp - create control qp
+ * @rf: RDMA PCI function
+ *
+ * Return 0, if the cqp and all the resources associated with it
+ * are successfully created, otherwise return error
+ */
+static enum irdma_status_code irdma_create_cqp(struct irdma_pci_f *rf)
+{
+ enum irdma_status_code status;
+ u32 sqsize = IRDMA_CQP_SW_SQSIZE_2048;
+ struct irdma_dma_mem mem;
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct irdma_cqp_init_info cqp_init_info = {};
+ struct irdma_cqp *cqp = &rf->cqp;
+ u16 maj_err, min_err;
+ int i;
+
+ cqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests), GFP_KERNEL);
+ if (!cqp->cqp_requests)
+ return IRDMA_ERR_NO_MEMORY;
+
+ cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL);
+ if (!cqp->scratch_array) {
+ kfree(cqp->cqp_requests);
+ return IRDMA_ERR_NO_MEMORY;
+ }
+
+ dev->cqp = &cqp->sc_cqp;
+ dev->cqp->dev = dev;
+ cqp->sq.size = ALIGN(sizeof(struct irdma_cqp_sq_wqe) * sqsize,
+ IRDMA_CQP_ALIGNMENT);
+ cqp->sq.va = dma_alloc_coherent(dev->hw->device, cqp->sq.size,
+ &cqp->sq.pa, GFP_KERNEL);
+ if (!cqp->sq.va) {
+ kfree(cqp->scratch_array);
+ kfree(cqp->cqp_requests);
+ return IRDMA_ERR_NO_MEMORY;
+ }
+
+ status = irdma_obj_aligned_mem(rf, &mem, sizeof(struct irdma_cqp_ctx),
+ IRDMA_HOST_CTX_ALIGNMENT_M);
+ if (status)
+ goto exit;
+
+ dev->cqp->host_ctx_pa = mem.pa;
+ dev->cqp->host_ctx = mem.va;
+ /* populate the cqp init info */
+ cqp_init_info.dev = dev;
+ cqp_init_info.sq_size = sqsize;
+ cqp_init_info.sq = cqp->sq.va;
+ cqp_init_info.sq_pa = cqp->sq.pa;
+ cqp_init_info.host_ctx_pa = mem.pa;
+ cqp_init_info.host_ctx = mem.va;
+ cqp_init_info.hmc_profile = rf->rsrc_profile;
+ cqp_init_info.scratch_array = cqp->scratch_array;
+ cqp_init_info.protocol_used = rf->protocol_used;
+
+ switch (rf->rdma_ver) {
+ case IRDMA_GEN_1:
+ cqp_init_info.hw_maj_ver = IRDMA_CQPHC_HW_MAJVER_GEN_1;
+ break;
+ case IRDMA_GEN_2:
+ cqp_init_info.hw_maj_ver = IRDMA_CQPHC_HW_MAJVER_GEN_2;
+ break;
+ }
+ status = irdma_sc_cqp_init(dev->cqp, &cqp_init_info);
+ if (status) {
+ ibdev_dbg(to_ibdev(dev), "ERR: cqp init status %d\n", status);
+ goto exit;
+ }
+
+ spin_lock_init(&cqp->req_lock);
+ spin_lock_init(&cqp->compl_lock);
+
+ status = irdma_sc_cqp_create(dev->cqp, &maj_err, &min_err);
+ if (status) {
+ ibdev_dbg(to_ibdev(dev),
+ "ERR: cqp create failed - status %d maj_err %d min_err %d\n",
+ status, maj_err, min_err);
+ goto exit;
+ }
+
+ INIT_LIST_HEAD(&cqp->cqp_avail_reqs);
+ INIT_LIST_HEAD(&cqp->cqp_pending_reqs);
+
+ /* init the waitqueue of the cqp_requests and add them to the list */
+ for (i = 0; i < sqsize; i++) {
+ init_waitqueue_head(&cqp->cqp_requests[i].waitq);
+ list_add_tail(&cqp->cqp_requests[i].list, &cqp->cqp_avail_reqs);
+ }
+ init_waitqueue_head(&cqp->remove_wq);
+ return 0;
+
+exit:
+ irdma_destroy_cqp(rf, false);
+
+ return status;
+}
+
+/**
+ * irdma_create_ccq - create control cq
+ * @rf: RDMA PCI function
+ *
+ * Return 0, if the ccq and the resources associated with it
+ * are successfully created, otherwise return error
+ */
+static enum irdma_status_code irdma_create_ccq(struct irdma_pci_f *rf)
+{
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ enum irdma_status_code status;
+ struct irdma_ccq_init_info info = {};
+ struct irdma_ccq *ccq = &rf->ccq;
+
+ dev->ccq = &ccq->sc_cq;
+ dev->ccq->dev = dev;
+ info.dev = dev;
+ ccq->shadow_area.size = sizeof(struct irdma_cq_shadow_area);
+ ccq->mem_cq.size = ALIGN(sizeof(struct irdma_cqe) * IW_CCQ_SIZE,
+ IRDMA_CQ0_ALIGNMENT);
+ ccq->mem_cq.va = dma_alloc_coherent(dev->hw->device, ccq->mem_cq.size,
+ &ccq->mem_cq.pa, GFP_KERNEL);
+ if (!ccq->mem_cq.va)
+ return IRDMA_ERR_NO_MEMORY;
+
+ status = irdma_obj_aligned_mem(rf, &ccq->shadow_area,
+ ccq->shadow_area.size,
+ IRDMA_SHADOWAREA_M);
+ if (status)
+ goto exit;
+
+ ccq->sc_cq.back_cq = ccq;
+ /* populate the ccq init info */
+ info.cq_base = ccq->mem_cq.va;
+ info.cq_pa = ccq->mem_cq.pa;
+ info.num_elem = IW_CCQ_SIZE;
+ info.shadow_area = ccq->shadow_area.va;
+ info.shadow_area_pa = ccq->shadow_area.pa;
+ info.ceqe_mask = false;
+ info.ceq_id_valid = true;
+ info.shadow_read_threshold = 16;
+ info.vsi = &rf->default_vsi;
+ status = irdma_sc_ccq_init(dev->ccq, &info);
+ if (!status)
+ status = irdma_sc_ccq_create(dev->ccq, 0, true, true);
+exit:
+ if (status) {
+ dma_free_coherent(dev->hw->device, ccq->mem_cq.size,
+ ccq->mem_cq.va, ccq->mem_cq.pa);
+ ccq->mem_cq.va = NULL;
+ }
+
+ return status;
+}
+
+/**
+ * irdma_alloc_set_mac - set up a mac address table entry
+ * @iwdev: irdma device
+ *
+ * Allocate a mac ip entry and add it to the hw table Return 0
+ * if successful, otherwise return error
+ */
+static enum irdma_status_code irdma_alloc_set_mac(struct irdma_device *iwdev)
+{
+ enum irdma_status_code status;
+
+ status = irdma_alloc_local_mac_entry(iwdev->rf,
+ &iwdev->mac_ip_table_idx);
+ if (!status) {
+ status = irdma_add_local_mac_entry(iwdev->rf,
+ (u8 *)iwdev->netdev->dev_addr,
+ (u8)iwdev->mac_ip_table_idx);
+ if (status)
+ irdma_del_local_mac_entry(iwdev->rf,
+ (u8)iwdev->mac_ip_table_idx);
+ }
+ return status;
+}
+
+/**
+ * irdma_cfg_ceq_vector - set up the msix interrupt vector for
+ * ceq
+ * @rf: RDMA PCI function
+ * @iwceq: ceq associated with the vector
+ * @ceq_id: the id number of the iwceq
+ * @msix_vec: interrupt vector information
+ *
+ * Allocate interrupt resources and enable irq handling
+ * Return 0 if successful, otherwise return error
+ */
+static enum irdma_status_code
+irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
+ u32 ceq_id, struct irdma_msix_vector *msix_vec)
+{
+ int status;
+
+ if (rf->msix_shared && !ceq_id) {
+ tasklet_setup(&rf->dpc_tasklet, irdma_dpc);
+ status = request_irq(msix_vec->irq, irdma_irq_handler, 0,
+ "AEQCEQ", rf);
+ } else {
+ tasklet_setup(&iwceq->dpc_tasklet, irdma_ceq_dpc);
+
+ status = request_irq(msix_vec->irq, irdma_ceq_handler, 0,
+ "CEQ", iwceq);
+ }
+ cpumask_clear(&msix_vec->mask);
+ cpumask_set_cpu(msix_vec->cpu_affinity, &msix_vec->mask);
+ irq_set_affinity_hint(msix_vec->irq, &msix_vec->mask);
+ if (status) {
+ ibdev_dbg(&rf->iwdev->ibdev, "ERR: ceq irq config fail\n");
+ return IRDMA_ERR_CFG;
+ }
+
+ msix_vec->ceq_id = ceq_id;
+ rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, ceq_id, msix_vec->idx, true);
+
+ return 0;
+}
+
+/**
+ * irdma_cfg_aeq_vector - set up the msix vector for aeq
+ * @rf: RDMA PCI function
+ *
+ * Allocate interrupt resources and enable irq handling
+ * Return 0 if successful, otherwise return error
+ */
+static enum irdma_status_code irdma_cfg_aeq_vector(struct irdma_pci_f *rf)
+{
+ struct irdma_msix_vector *msix_vec = rf->iw_msixtbl;
+ u32 ret = 0;
+
+ if (!rf->msix_shared) {
+ tasklet_setup(&rf->dpc_tasklet, irdma_dpc);
+ ret = request_irq(msix_vec->irq, irdma_irq_handler, 0,
+ "irdma", rf);
+ }
+ if (ret) {
+ ibdev_dbg(&rf->iwdev->ibdev, "ERR: aeq irq config fail\n");
+ return IRDMA_ERR_CFG;
+ }
+
+ rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, msix_vec->idx, true);
+
+ return 0;
+}
+
+/**
+ * irdma_create_ceq - create completion event queue
+ * @rf: RDMA PCI function
+ * @iwceq: pointer to the ceq resources to be created
+ * @ceq_id: the id number of the iwceq
+ * @vsi: SC vsi struct
+ *
+ * Return 0, if the ceq and the resources associated with it
+ * are successfully created, otherwise return error
+ */
+static enum irdma_status_code irdma_create_ceq(struct irdma_pci_f *rf,
+ struct irdma_ceq *iwceq,
+ u32 ceq_id,
+ struct irdma_sc_vsi *vsi)
+{
+ enum irdma_status_code status;
+ struct irdma_ceq_init_info info = {};
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ u64 scratch;
+ u32 ceq_size;
+
+ info.ceq_id = ceq_id;
+ iwceq->rf = rf;
+ ceq_size = min(rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt,
+ dev->hw_attrs.max_hw_ceq_size);
+ iwceq->mem.size = ALIGN(sizeof(struct irdma_ceqe) * ceq_size,
+ IRDMA_CEQ_ALIGNMENT);
+ iwceq->mem.va = dma_alloc_coherent(dev->hw->device, iwceq->mem.size,
+ &iwceq->mem.pa, GFP_KERNEL);
+ if (!iwceq->mem.va)
+ return IRDMA_ERR_NO_MEMORY;
+
+ info.ceq_id = ceq_id;
+ info.ceqe_base = iwceq->mem.va;
+ info.ceqe_pa = iwceq->mem.pa;
+ info.elem_cnt = ceq_size;
+ iwceq->sc_ceq.ceq_id = ceq_id;
+ info.dev = dev;
+ info.vsi = vsi;
+ scratch = (uintptr_t)&rf->cqp.sc_cqp;
+ status = irdma_sc_ceq_init(&iwceq->sc_ceq, &info);
+ if (!status) {
+ if (dev->ceq_valid)
+ status = irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq,
+ IRDMA_OP_CEQ_CREATE);
+ else
+ status = irdma_sc_cceq_create(&iwceq->sc_ceq, scratch);
+ }
+
+ if (status) {
+ dma_free_coherent(dev->hw->device, iwceq->mem.size,
+ iwceq->mem.va, iwceq->mem.pa);
+ iwceq->mem.va = NULL;
+ }
+
+ return status;
+}
+
+/**
+ * irdma_setup_ceq_0 - create CEQ 0 and it's interrupt resource
+ * @rf: RDMA PCI function
+ *
+ * Allocate a list for all device completion event queues
+ * Create the ceq 0 and configure it's msix interrupt vector
+ * Return 0, if successfully set up, otherwise return error
+ */
+static enum irdma_status_code irdma_setup_ceq_0(struct irdma_pci_f *rf)
+{
+ struct irdma_ceq *iwceq;
+ struct irdma_msix_vector *msix_vec;
+ u32 i;
+ enum irdma_status_code status = 0;
+ u32 num_ceqs;
+
+ num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs);
+ rf->ceqlist = kcalloc(num_ceqs, sizeof(*rf->ceqlist), GFP_KERNEL);
+ if (!rf->ceqlist) {
+ status = IRDMA_ERR_NO_MEMORY;
+ goto exit;
+ }
+
+ iwceq = &rf->ceqlist[0];
+ status = irdma_create_ceq(rf, iwceq, 0, &rf->default_vsi);
+ if (status) {
+ ibdev_dbg(&rf->iwdev->ibdev, "ERR: create ceq status = %d\n",
+ status);
+ goto exit;
+ }
+
+ spin_lock_init(&iwceq->ce_lock);
+ i = rf->msix_shared ? 0 : 1;
+ msix_vec = &rf->iw_msixtbl[i];
+ iwceq->irq = msix_vec->irq;
+ iwceq->msix_idx = msix_vec->idx;
+ status = irdma_cfg_ceq_vector(rf, iwceq, 0, msix_vec);
+ if (status) {
+ irdma_destroy_ceq(rf, iwceq);
+ goto exit;
+ }
+
+ irdma_ena_intr(&rf->sc_dev, msix_vec->idx);
+ rf->ceqs_count++;
+
+exit:
+ if (status && !rf->ceqs_count) {
+ kfree(rf->ceqlist);
+ rf->ceqlist = NULL;
+ return status;
+ }
+ rf->sc_dev.ceq_valid = true;
+
+ return 0;
+}
+
+/**
+ * irdma_setup_ceqs - manage the device ceq's and their interrupt resources
+ * @rf: RDMA PCI function
+ * @vsi: VSI structure for this CEQ
+ *
+ * Allocate a list for all device completion event queues
+ * Create the ceq's and configure their msix interrupt vectors
+ * Return 0, if ceqs are successfully set up, otherwise return error
+ */
+static enum irdma_status_code irdma_setup_ceqs(struct irdma_pci_f *rf,
+ struct irdma_sc_vsi *vsi)
+{
+ u32 i;
+ u32 ceq_id;
+ struct irdma_ceq *iwceq;
+ struct irdma_msix_vector *msix_vec;
+ enum irdma_status_code status;
+ u32 num_ceqs;
+
+ num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs);
+ i = (rf->msix_shared) ? 1 : 2;
+ for (ceq_id = 1; i < num_ceqs; i++, ceq_id++) {
+ iwceq = &rf->ceqlist[ceq_id];
+ status = irdma_create_ceq(rf, iwceq, ceq_id, vsi);
+ if (status) {
+ ibdev_dbg(&rf->iwdev->ibdev,
+ "ERR: create ceq status = %d\n", status);
+ goto del_ceqs;
+ }
+ spin_lock_init(&iwceq->ce_lock);
+ msix_vec = &rf->iw_msixtbl[i];
+ iwceq->irq = msix_vec->irq;
+ iwceq->msix_idx = msix_vec->idx;
+ status = irdma_cfg_ceq_vector(rf, iwceq, ceq_id, msix_vec);
+ if (status) {
+ irdma_destroy_ceq(rf, iwceq);
+ goto del_ceqs;
+ }
+ irdma_ena_intr(&rf->sc_dev, msix_vec->idx);
+ rf->ceqs_count++;
+ }
+
+ return 0;
+
+del_ceqs:
+ irdma_del_ceqs(rf);
+
+ return status;
+}
+
+static enum irdma_status_code irdma_create_virt_aeq(struct irdma_pci_f *rf,
+ u32 size)
+{
+ enum irdma_status_code status = IRDMA_ERR_NO_MEMORY;
+ struct irdma_aeq *aeq = &rf->aeq;
+ dma_addr_t *pg_arr;
+ u32 pg_cnt;
+
+ if (rf->rdma_ver < IRDMA_GEN_2)
+ return IRDMA_NOT_SUPPORTED;
+
+ aeq->mem.size = sizeof(struct irdma_sc_aeqe) * size;
+ aeq->mem.va = vzalloc(aeq->mem.size);
+
+ if (!aeq->mem.va)
+ return status;
+
+ pg_cnt = DIV_ROUND_UP(aeq->mem.size, PAGE_SIZE);
+ status = irdma_get_pble(rf->pble_rsrc, &aeq->palloc, pg_cnt, true);
+ if (status) {
+ vfree(aeq->mem.va);
+ return status;
+ }
+
+ pg_arr = (dma_addr_t *)aeq->palloc.level1.addr;
+ status = irdma_map_vm_page_list(&rf->hw, aeq->mem.va, pg_arr, pg_cnt);
+ if (status) {
+ irdma_free_pble(rf->pble_rsrc, &aeq->palloc);
+ vfree(aeq->mem.va);
+ return status;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_create_aeq - create async event queue
+ * @rf: RDMA PCI function
+ *
+ * Return 0, if the aeq and the resources associated with it
+ * are successfully created, otherwise return error
+ */
+static enum irdma_status_code irdma_create_aeq(struct irdma_pci_f *rf)
+{
+ enum irdma_status_code status;
+ struct irdma_aeq_init_info info = {};
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct irdma_aeq *aeq = &rf->aeq;
+ struct irdma_hmc_info *hmc_info = rf->sc_dev.hmc_info;
+ u32 aeq_size;
+ u8 multiplier = (rf->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) ? 2 : 1;
+
+ aeq_size = multiplier * hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt +
+ hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt;
+ aeq_size = min(aeq_size, dev->hw_attrs.max_hw_aeq_size);
+
+ aeq->mem.size = ALIGN(sizeof(struct irdma_sc_aeqe) * aeq_size,
+ IRDMA_AEQ_ALIGNMENT);
+ aeq->mem.va = dma_alloc_coherent(dev->hw->device, aeq->mem.size,
+ &aeq->mem.pa,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (aeq->mem.va)
+ goto skip_virt_aeq;
+
+ /* physically mapped aeq failed. setup virtual aeq */
+ status = irdma_create_virt_aeq(rf, aeq_size);
+ if (status)
+ return status;
+
+ info.virtual_map = true;
+ aeq->virtual_map = info.virtual_map;
+ info.pbl_chunk_size = 1;
+ info.first_pm_pbl_idx = aeq->palloc.level1.idx;
+
+skip_virt_aeq:
+ info.aeqe_base = aeq->mem.va;
+ info.aeq_elem_pa = aeq->mem.pa;
+ info.elem_cnt = aeq_size;
+ info.dev = dev;
+ info.msix_idx = rf->iw_msixtbl->idx;
+ status = irdma_sc_aeq_init(&aeq->sc_aeq, &info);
+ if (status)
+ goto err;
+
+ status = irdma_cqp_aeq_cmd(dev, &aeq->sc_aeq, IRDMA_OP_AEQ_CREATE);
+ if (status)
+ goto err;
+
+ return 0;
+
+err:
+ if (aeq->virtual_map) {
+ irdma_destroy_virt_aeq(rf);
+ } else {
+ dma_free_coherent(dev->hw->device, aeq->mem.size, aeq->mem.va,
+ aeq->mem.pa);
+ aeq->mem.va = NULL;
+ }
+
+ return status;
+}
+
+/**
+ * irdma_setup_aeq - set up the device aeq
+ * @rf: RDMA PCI function
+ *
+ * Create the aeq and configure its msix interrupt vector
+ * Return 0 if successful, otherwise return error
+ */
+static enum irdma_status_code irdma_setup_aeq(struct irdma_pci_f *rf)
+{
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ enum irdma_status_code status;
+
+ status = irdma_create_aeq(rf);
+ if (status)
+ return status;
+
+ status = irdma_cfg_aeq_vector(rf);
+ if (status) {
+ irdma_destroy_aeq(rf);
+ return status;
+ }
+
+ if (!rf->msix_shared)
+ irdma_ena_intr(dev, rf->iw_msixtbl[0].idx);
+
+ return 0;
+}
+
+/**
+ * irdma_initialize_ilq - create iwarp local queue for cm
+ * @iwdev: irdma device
+ *
+ * Return 0 if successful, otherwise return error
+ */
+static enum irdma_status_code irdma_initialize_ilq(struct irdma_device *iwdev)
+{
+ struct irdma_puda_rsrc_info info = {};
+ enum irdma_status_code status;
+
+ info.type = IRDMA_PUDA_RSRC_TYPE_ILQ;
+ info.cq_id = 1;
+ info.qp_id = 1;
+ info.count = 1;
+ info.pd_id = 1;
+ info.abi_ver = IRDMA_ABI_VER;
+ info.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768);
+ info.rq_size = info.sq_size;
+ info.buf_size = 1024;
+ info.tx_buf_cnt = 2 * info.sq_size;
+ info.receive = irdma_receive_ilq;
+ info.xmit_complete = irdma_free_sqbuf;
+ status = irdma_puda_create_rsrc(&iwdev->vsi, &info);
+ if (status)
+ ibdev_dbg(&iwdev->ibdev, "ERR: ilq create fail\n");
+
+ return status;
+}
+
+/**
+ * irdma_initialize_ieq - create iwarp exception queue
+ * @iwdev: irdma device
+ *
+ * Return 0 if successful, otherwise return error
+ */
+static enum irdma_status_code irdma_initialize_ieq(struct irdma_device *iwdev)
+{
+ struct irdma_puda_rsrc_info info = {};
+ enum irdma_status_code status;
+
+ info.type = IRDMA_PUDA_RSRC_TYPE_IEQ;
+ info.cq_id = 2;
+ info.qp_id = iwdev->vsi.exception_lan_q;
+ info.count = 1;
+ info.pd_id = 2;
+ info.abi_ver = IRDMA_ABI_VER;
+ info.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768);
+ info.rq_size = info.sq_size;
+ info.buf_size = iwdev->vsi.mtu + IRDMA_IPV4_PAD;
+ info.tx_buf_cnt = 4096;
+ status = irdma_puda_create_rsrc(&iwdev->vsi, &info);
+ if (status)
+ ibdev_dbg(&iwdev->ibdev, "ERR: ieq create fail\n");
+
+ return status;
+}
+
+/**
+ * irdma_reinitialize_ieq - destroy and re-create ieq
+ * @vsi: VSI structure
+ */
+void irdma_reinitialize_ieq(struct irdma_sc_vsi *vsi)
+{
+ struct irdma_device *iwdev = vsi->back_vsi;
+ struct irdma_pci_f *rf = iwdev->rf;
+
+ irdma_puda_dele_rsrc(vsi, IRDMA_PUDA_RSRC_TYPE_IEQ, false);
+ if (irdma_initialize_ieq(iwdev)) {
+ iwdev->reset = true;
+ rf->gen_ops.request_reset(rf);
+ }
+}
+
+/**
+ * irdma_hmc_setup - create hmc objects for the device
+ * @rf: RDMA PCI function
+ *
+ * Set up the device private memory space for the number and size of
+ * the hmc objects and create the objects
+ * Return 0 if successful, otherwise return error
+ */
+static enum irdma_status_code irdma_hmc_setup(struct irdma_pci_f *rf)
+{
+ enum irdma_status_code status;
+ u32 qpcnt;
+
+ if (rf->rdma_ver == IRDMA_GEN_1)
+ qpcnt = rsrc_limits_table[rf->limits_sel].qplimit * 2;
+ else
+ qpcnt = rsrc_limits_table[rf->limits_sel].qplimit;
+
+ rf->sd_type = IRDMA_SD_TYPE_DIRECT;
+ status = irdma_cfg_fpm_val(&rf->sc_dev, qpcnt);
+ if (status)
+ return status;
+
+ status = irdma_create_hmc_objs(rf, true, rf->rdma_ver);
+
+ return status;
+}
+
+/**
+ * irdma_del_init_mem - deallocate memory resources
+ * @rf: RDMA PCI function
+ */
+static void irdma_del_init_mem(struct irdma_pci_f *rf)
+{
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+
+ kfree(dev->hmc_info->sd_table.sd_entry);
+ dev->hmc_info->sd_table.sd_entry = NULL;
+ kfree(rf->mem_rsrc);
+ rf->mem_rsrc = NULL;
+ dma_free_coherent(rf->hw.device, rf->obj_mem.size, rf->obj_mem.va,
+ rf->obj_mem.pa);
+ rf->obj_mem.va = NULL;
+ if (rf->rdma_ver != IRDMA_GEN_1) {
+ kfree(rf->allocated_ws_nodes);
+ rf->allocated_ws_nodes = NULL;
+ }
+ kfree(rf->ceqlist);
+ rf->ceqlist = NULL;
+ kfree(rf->iw_msixtbl);
+ rf->iw_msixtbl = NULL;
+ kfree(rf->hmc_info_mem);
+ rf->hmc_info_mem = NULL;
+}
+
+/**
+ * irdma_initialize_dev - initialize device
+ * @rf: RDMA PCI function
+ *
+ * Allocate memory for the hmc objects and initialize iwdev
+ * Return 0 if successful, otherwise clean up the resources
+ * and return error
+ */
+static enum irdma_status_code irdma_initialize_dev(struct irdma_pci_f *rf)
+{
+ enum irdma_status_code status;
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct irdma_device_init_info info = {};
+ struct irdma_dma_mem mem;
+ u32 size;
+
+ size = sizeof(struct irdma_hmc_pble_rsrc) +
+ sizeof(struct irdma_hmc_info) +
+ (sizeof(struct irdma_hmc_obj_info) * IRDMA_HMC_IW_MAX);
+
+ rf->hmc_info_mem = kzalloc(size, GFP_KERNEL);
+ if (!rf->hmc_info_mem)
+ return IRDMA_ERR_NO_MEMORY;
+
+ rf->pble_rsrc = (struct irdma_hmc_pble_rsrc *)rf->hmc_info_mem;
+ dev->hmc_info = &rf->hw.hmc;
+ dev->hmc_info->hmc_obj = (struct irdma_hmc_obj_info *)
+ (rf->pble_rsrc + 1);
+
+ status = irdma_obj_aligned_mem(rf, &mem, IRDMA_QUERY_FPM_BUF_SIZE,
+ IRDMA_FPM_QUERY_BUF_ALIGNMENT_M);
+ if (status)
+ goto error;
+
+ info.fpm_query_buf_pa = mem.pa;
+ info.fpm_query_buf = mem.va;
+
+ status = irdma_obj_aligned_mem(rf, &mem, IRDMA_COMMIT_FPM_BUF_SIZE,
+ IRDMA_FPM_COMMIT_BUF_ALIGNMENT_M);
+ if (status)
+ goto error;
+
+ info.fpm_commit_buf_pa = mem.pa;
+ info.fpm_commit_buf = mem.va;
+
+ info.bar0 = rf->hw.hw_addr;
+ info.hmc_fn_id = PCI_FUNC(rf->pcidev->devfn);
+ info.hw = &rf->hw;
+ status = irdma_sc_dev_init(rf->rdma_ver, &rf->sc_dev, &info);
+ if (status)
+ goto error;
+
+ return status;
+error:
+ kfree(rf->hmc_info_mem);
+ rf->hmc_info_mem = NULL;
+
+ return status;
+}
+
+/**
+ * irdma_rt_deinit_hw - clean up the irdma device resources
+ * @iwdev: irdma device
+ *
+ * remove the mac ip entry and ipv4/ipv6 addresses, destroy the
+ * device queues and free the pble and the hmc objects
+ */
+void irdma_rt_deinit_hw(struct irdma_device *iwdev)
+{
+ ibdev_dbg(&iwdev->ibdev, "INIT: state = %d\n", iwdev->init_state);
+
+ switch (iwdev->init_state) {
+ case IP_ADDR_REGISTERED:
+ if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
+ irdma_del_local_mac_entry(iwdev->rf,
+ (u8)iwdev->mac_ip_table_idx);
+ fallthrough;
+ case AEQ_CREATED:
+ case PBLE_CHUNK_MEM:
+ case CEQS_CREATED:
+ case IEQ_CREATED:
+ if (!iwdev->roce_mode)
+ irdma_puda_dele_rsrc(&iwdev->vsi, IRDMA_PUDA_RSRC_TYPE_IEQ,
+ iwdev->reset);
+ fallthrough;
+ case ILQ_CREATED:
+ if (!iwdev->roce_mode)
+ irdma_puda_dele_rsrc(&iwdev->vsi,
+ IRDMA_PUDA_RSRC_TYPE_ILQ,
+ iwdev->reset);
+ break;
+ default:
+ ibdev_warn(&iwdev->ibdev, "bad init_state = %d\n", iwdev->init_state);
+ break;
+ }
+
+ irdma_cleanup_cm_core(&iwdev->cm_core);
+ if (iwdev->vsi.pestat) {
+ irdma_vsi_stats_free(&iwdev->vsi);
+ kfree(iwdev->vsi.pestat);
+ }
+ if (iwdev->cleanup_wq)
+ destroy_workqueue(iwdev->cleanup_wq);
+}
+
+static enum irdma_status_code irdma_setup_init_state(struct irdma_pci_f *rf)
+{
+ enum irdma_status_code status;
+
+ status = irdma_save_msix_info(rf);
+ if (status)
+ return status;
+
+ rf->hw.device = &rf->pcidev->dev;
+ rf->obj_mem.size = ALIGN(8192, IRDMA_HW_PAGE_SIZE);
+ rf->obj_mem.va = dma_alloc_coherent(rf->hw.device, rf->obj_mem.size,
+ &rf->obj_mem.pa, GFP_KERNEL);
+ if (!rf->obj_mem.va) {
+ status = IRDMA_ERR_NO_MEMORY;
+ goto clean_msixtbl;
+ }
+
+ rf->obj_next = rf->obj_mem;
+ status = irdma_initialize_dev(rf);
+ if (status)
+ goto clean_obj_mem;
+
+ return 0;
+
+clean_obj_mem:
+ dma_free_coherent(rf->hw.device, rf->obj_mem.size, rf->obj_mem.va,
+ rf->obj_mem.pa);
+ rf->obj_mem.va = NULL;
+clean_msixtbl:
+ kfree(rf->iw_msixtbl);
+ rf->iw_msixtbl = NULL;
+ return status;
+}
+
+/**
+ * irdma_get_used_rsrc - determine resources used internally
+ * @iwdev: irdma device
+ *
+ * Called at the end of open to get all internal allocations
+ */
+static void irdma_get_used_rsrc(struct irdma_device *iwdev)
+{
+ iwdev->rf->used_pds = find_next_zero_bit(iwdev->rf->allocated_pds,
+ iwdev->rf->max_pd, 0);
+ iwdev->rf->used_qps = find_next_zero_bit(iwdev->rf->allocated_qps,
+ iwdev->rf->max_qp, 0);
+ iwdev->rf->used_cqs = find_next_zero_bit(iwdev->rf->allocated_cqs,
+ iwdev->rf->max_cq, 0);
+ iwdev->rf->used_mrs = find_next_zero_bit(iwdev->rf->allocated_mrs,
+ iwdev->rf->max_mr, 0);
+}
+
+void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf)
+{
+ enum init_completion_state state = rf->init_state;
+
+ rf->init_state = INVALID_STATE;
+ if (rf->rsrc_created) {
+ irdma_destroy_aeq(rf);
+ irdma_destroy_pble_prm(rf->pble_rsrc);
+ irdma_del_ceqs(rf);
+ rf->rsrc_created = false;
+ }
+ switch (state) {
+ case CEQ0_CREATED:
+ irdma_del_ceq_0(rf);
+ fallthrough;
+ case CCQ_CREATED:
+ irdma_destroy_ccq(rf);
+ fallthrough;
+ case HW_RSRC_INITIALIZED:
+ case HMC_OBJS_CREATED:
+ irdma_del_hmc_objects(&rf->sc_dev, rf->sc_dev.hmc_info, true,
+ rf->reset, rf->rdma_ver);
+ fallthrough;
+ case CQP_CREATED:
+ irdma_destroy_cqp(rf, true);
+ fallthrough;
+ case INITIAL_STATE:
+ irdma_del_init_mem(rf);
+ break;
+ case INVALID_STATE:
+ default:
+ ibdev_warn(&rf->iwdev->ibdev, "bad init_state = %d\n", rf->init_state);
+ break;
+ }
+}
+
+/**
+ * irdma_rt_init_hw - Initializes runtime portion of HW
+ * @iwdev: irdma device
+ * @l2params: qos, tc, mtu info from netdev driver
+ *
+ * Create device queues ILQ, IEQ, CEQs and PBLEs. Setup irdma
+ * device resource objects.
+ */
+enum irdma_status_code irdma_rt_init_hw(struct irdma_device *iwdev,
+ struct irdma_l2params *l2params)
+{
+ struct irdma_pci_f *rf = iwdev->rf;
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ enum irdma_status_code status;
+ struct irdma_vsi_init_info vsi_info = {};
+ struct irdma_vsi_stats_info stats_info = {};
+
+ vsi_info.dev = dev;
+ vsi_info.back_vsi = iwdev;
+ vsi_info.params = l2params;
+ vsi_info.pf_data_vsi_num = iwdev->vsi_num;
+ vsi_info.register_qset = rf->gen_ops.register_qset;
+ vsi_info.unregister_qset = rf->gen_ops.unregister_qset;
+ vsi_info.exception_lan_q = 2;
+ irdma_sc_vsi_init(&iwdev->vsi, &vsi_info);
+
+ status = irdma_setup_cm_core(iwdev, rf->rdma_ver);
+ if (status)
+ return status;
+
+ stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL);
+ if (!stats_info.pestat) {
+ irdma_cleanup_cm_core(&iwdev->cm_core);
+ return IRDMA_ERR_NO_MEMORY;
+ }
+ stats_info.fcn_id = dev->hmc_fn_id;
+ status = irdma_vsi_stats_init(&iwdev->vsi, &stats_info);
+ if (status) {
+ irdma_cleanup_cm_core(&iwdev->cm_core);
+ kfree(stats_info.pestat);
+ return status;
+ }
+
+ do {
+ if (!iwdev->roce_mode) {
+ status = irdma_initialize_ilq(iwdev);
+ if (status)
+ break;
+ iwdev->init_state = ILQ_CREATED;
+ status = irdma_initialize_ieq(iwdev);
+ if (status)
+ break;
+ iwdev->init_state = IEQ_CREATED;
+ }
+ if (!rf->rsrc_created) {
+ status = irdma_setup_ceqs(rf, &iwdev->vsi);
+ if (status)
+ break;
+
+ iwdev->init_state = CEQS_CREATED;
+
+ status = irdma_hmc_init_pble(&rf->sc_dev,
+ rf->pble_rsrc);
+ if (status) {
+ irdma_del_ceqs(rf);
+ break;
+ }
+
+ iwdev->init_state = PBLE_CHUNK_MEM;
+
+ status = irdma_setup_aeq(rf);
+ if (status) {
+ irdma_destroy_pble_prm(rf->pble_rsrc);
+ irdma_del_ceqs(rf);
+ break;
+ }
+ iwdev->init_state = AEQ_CREATED;
+ rf->rsrc_created = true;
+ }
+
+ iwdev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY |
+ IB_DEVICE_MEM_WINDOW |
+ IB_DEVICE_MEM_MGT_EXTENSIONS;
+
+ if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
+ irdma_alloc_set_mac(iwdev);
+ irdma_add_ip(iwdev);
+ iwdev->init_state = IP_ADDR_REGISTERED;
+
+ /* handles asynch cleanup tasks - disconnect CM , free qp,
+ * free cq bufs
+ */
+ iwdev->cleanup_wq = alloc_workqueue("irdma-cleanup-wq",
+ WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
+ if (!iwdev->cleanup_wq)
+ return IRDMA_ERR_NO_MEMORY;
+ irdma_get_used_rsrc(iwdev);
+ init_waitqueue_head(&iwdev->suspend_wq);
+
+ return 0;
+ } while (0);
+
+ dev_err(&rf->pcidev->dev, "HW runtime init FAIL status = %d last cmpl = %d\n",
+ status, iwdev->init_state);
+ irdma_rt_deinit_hw(iwdev);
+
+ return status;
+}
+
+/**
+ * irdma_ctrl_init_hw - Initializes control portion of HW
+ * @rf: RDMA PCI function
+ *
+ * Create admin queues, HMC obejcts and RF resource objects
+ */
+enum irdma_status_code irdma_ctrl_init_hw(struct irdma_pci_f *rf)
+{
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ enum irdma_status_code status;
+ do {
+ status = irdma_setup_init_state(rf);
+ if (status)
+ break;
+ rf->init_state = INITIAL_STATE;
+
+ status = irdma_create_cqp(rf);
+ if (status)
+ break;
+ rf->init_state = CQP_CREATED;
+
+ status = irdma_hmc_setup(rf);
+ if (status)
+ break;
+ rf->init_state = HMC_OBJS_CREATED;
+
+ status = irdma_initialize_hw_rsrc(rf);
+ if (status)
+ break;
+ rf->init_state = HW_RSRC_INITIALIZED;
+
+ status = irdma_create_ccq(rf);
+ if (status)
+ break;
+ rf->init_state = CCQ_CREATED;
+
+ dev->feature_info[IRDMA_FEATURE_FW_INFO] = IRDMA_FW_VER_DEFAULT;
+ if (rf->rdma_ver != IRDMA_GEN_1) {
+ status = irdma_get_rdma_features(dev);
+ if (status)
+ break;
+ }
+
+ status = irdma_setup_ceq_0(rf);
+ if (status)
+ break;
+ rf->init_state = CEQ0_CREATED;
+ /* Handles processing of CQP completions */
+ rf->cqp_cmpl_wq = alloc_ordered_workqueue("cqp_cmpl_wq",
+ WQ_HIGHPRI | WQ_UNBOUND);
+ if (!rf->cqp_cmpl_wq) {
+ status = IRDMA_ERR_NO_MEMORY;
+ break;
+ }
+ INIT_WORK(&rf->cqp_cmpl_work, cqp_compl_worker);
+ irdma_sc_ccq_arm(dev->ccq);
+ return 0;
+ } while (0);
+
+ dev_err(&rf->pcidev->dev, "IRDMA hardware initialization FAILED init_state=%d status=%d\n",
+ rf->init_state, status);
+ irdma_ctrl_deinit_hw(rf);
+ return status;
+}
+
+/**
+ * irdma_set_hw_rsrc - set hw memory resources.
+ * @rf: RDMA PCI function
+ */
+static u32 irdma_set_hw_rsrc(struct irdma_pci_f *rf)
+{
+ rf->allocated_qps = (void *)(rf->mem_rsrc +
+ (sizeof(struct irdma_arp_entry) * rf->arp_table_size));
+ rf->allocated_cqs = &rf->allocated_qps[BITS_TO_LONGS(rf->max_qp)];
+ rf->allocated_mrs = &rf->allocated_cqs[BITS_TO_LONGS(rf->max_cq)];
+ rf->allocated_pds = &rf->allocated_mrs[BITS_TO_LONGS(rf->max_mr)];
+ rf->allocated_ahs = &rf->allocated_pds[BITS_TO_LONGS(rf->max_pd)];
+ rf->allocated_mcgs = &rf->allocated_ahs[BITS_TO_LONGS(rf->max_ah)];
+ rf->allocated_arps = &rf->allocated_mcgs[BITS_TO_LONGS(rf->max_mcg)];
+ rf->qp_table = (struct irdma_qp **)
+ (&rf->allocated_arps[BITS_TO_LONGS(rf->arp_table_size)]);
+
+ spin_lock_init(&rf->rsrc_lock);
+ spin_lock_init(&rf->arp_lock);
+ spin_lock_init(&rf->qptable_lock);
+ spin_lock_init(&rf->qh_list_lock);
+
+ return 0;
+}
+
+/**
+ * irdma_calc_mem_rsrc_size - calculate memory resources size.
+ * @rf: RDMA PCI function
+ */
+static u32 irdma_calc_mem_rsrc_size(struct irdma_pci_f *rf)
+{
+ u32 rsrc_size;
+
+ rsrc_size = sizeof(struct irdma_arp_entry) * rf->arp_table_size;
+ rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_qp);
+ rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mr);
+ rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_cq);
+ rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_pd);
+ rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->arp_table_size);
+ rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_ah);
+ rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mcg);
+ rsrc_size += sizeof(struct irdma_qp **) * rf->max_qp;
+
+ return rsrc_size;
+}
+
+/**
+ * irdma_initialize_hw_rsrc - initialize hw resource tracking array
+ * @rf: RDMA PCI function
+ */
+u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf)
+{
+ u32 rsrc_size;
+ u32 mrdrvbits;
+ u32 ret;
+
+ if (rf->rdma_ver != IRDMA_GEN_1) {
+ rf->allocated_ws_nodes =
+ kcalloc(BITS_TO_LONGS(IRDMA_MAX_WS_NODES),
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!rf->allocated_ws_nodes)
+ return -ENOMEM;
+
+ set_bit(0, rf->allocated_ws_nodes);
+ rf->max_ws_node_id = IRDMA_MAX_WS_NODES;
+ }
+ rf->max_cqe = rf->sc_dev.hw_attrs.uk_attrs.max_hw_cq_size;
+ rf->max_qp = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt;
+ rf->max_mr = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt;
+ rf->max_cq = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt;
+ rf->max_pd = rf->sc_dev.hw_attrs.max_hw_pds;
+ rf->arp_table_size = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].cnt;
+ rf->max_ah = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt;
+ rf->max_mcg = rf->max_qp;
+
+ rsrc_size = irdma_calc_mem_rsrc_size(rf);
+ rf->mem_rsrc = kzalloc(rsrc_size, GFP_KERNEL);
+ if (!rf->mem_rsrc) {
+ ret = -ENOMEM;
+ goto mem_rsrc_kzalloc_fail;
+ }
+
+ rf->arp_table = (struct irdma_arp_entry *)rf->mem_rsrc;
+
+ ret = irdma_set_hw_rsrc(rf);
+ if (ret)
+ goto set_hw_rsrc_fail;
+
+ set_bit(0, rf->allocated_mrs);
+ set_bit(0, rf->allocated_qps);
+ set_bit(0, rf->allocated_cqs);
+ set_bit(0, rf->allocated_pds);
+ set_bit(0, rf->allocated_arps);
+ set_bit(0, rf->allocated_ahs);
+ set_bit(0, rf->allocated_mcgs);
+ set_bit(2, rf->allocated_qps); /* qp 2 IEQ */
+ set_bit(1, rf->allocated_qps); /* qp 1 ILQ */
+ set_bit(1, rf->allocated_cqs);
+ set_bit(1, rf->allocated_pds);
+ set_bit(2, rf->allocated_cqs);
+ set_bit(2, rf->allocated_pds);
+
+ INIT_LIST_HEAD(&rf->mc_qht_list.list);
+ /* stag index mask has a minimum of 14 bits */
+ mrdrvbits = 24 - max(get_count_order(rf->max_mr), 14);
+ rf->mr_stagmask = ~(((1 << mrdrvbits) - 1) << (32 - mrdrvbits));
+
+ return 0;
+
+set_hw_rsrc_fail:
+ kfree(rf->mem_rsrc);
+ rf->mem_rsrc = NULL;
+mem_rsrc_kzalloc_fail:
+ kfree(rf->allocated_ws_nodes);
+ rf->allocated_ws_nodes = NULL;
+
+ return ret;
+}
+
+/**
+ * irdma_cqp_ce_handler - handle cqp completions
+ * @rf: RDMA PCI function
+ * @cq: cq for cqp completions
+ */
+void irdma_cqp_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq)
+{
+ struct irdma_cqp_request *cqp_request;
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ u32 cqe_count = 0;
+ struct irdma_ccq_cqe_info info;
+ unsigned long flags;
+ int ret;
+
+ do {
+ memset(&info, 0, sizeof(info));
+ spin_lock_irqsave(&rf->cqp.compl_lock, flags);
+ ret = irdma_sc_ccq_get_cqe_info(cq, &info);
+ spin_unlock_irqrestore(&rf->cqp.compl_lock, flags);
+ if (ret)
+ break;
+
+ cqp_request = (struct irdma_cqp_request *)
+ (unsigned long)info.scratch;
+ if (info.error && irdma_cqp_crit_err(dev, cqp_request->info.cqp_cmd,
+ info.maj_err_code,
+ info.min_err_code))
+ ibdev_err(&rf->iwdev->ibdev, "cqp opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\n",
+ info.op_code, info.maj_err_code, info.min_err_code);
+ if (cqp_request) {
+ cqp_request->compl_info.maj_err_code = info.maj_err_code;
+ cqp_request->compl_info.min_err_code = info.min_err_code;
+ cqp_request->compl_info.op_ret_val = info.op_ret_val;
+ cqp_request->compl_info.error = info.error;
+
+ if (cqp_request->waiting) {
+ cqp_request->request_done = true;
+ wake_up(&cqp_request->waitq);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+ } else {
+ if (cqp_request->callback_fcn)
+ cqp_request->callback_fcn(cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+ }
+ }
+
+ cqe_count++;
+ } while (1);
+
+ if (cqe_count) {
+ irdma_process_bh(dev);
+ irdma_sc_ccq_arm(cq);
+ }
+}
+
+/**
+ * cqp_compl_worker - Handle cqp completions
+ * @work: Pointer to work structure
+ */
+void cqp_compl_worker(struct work_struct *work)
+{
+ struct irdma_pci_f *rf = container_of(work, struct irdma_pci_f,
+ cqp_cmpl_work);
+ struct irdma_sc_cq *cq = &rf->ccq.sc_cq;
+
+ irdma_cqp_ce_handler(rf, cq);
+}
+
+/**
+ * irdma_lookup_apbvt_entry - lookup hash table for an existing apbvt entry corresponding to port
+ * @cm_core: cm's core
+ * @port: port to identify apbvt entry
+ */
+static struct irdma_apbvt_entry *irdma_lookup_apbvt_entry(struct irdma_cm_core *cm_core,
+ u16 port)
+{
+ struct irdma_apbvt_entry *entry;
+
+ hash_for_each_possible(cm_core->apbvt_hash_tbl, entry, hlist, port) {
+ if (entry->port == port) {
+ entry->use_cnt++;
+ return entry;
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * irdma_next_iw_state - modify qp state
+ * @iwqp: iwarp qp to modify
+ * @state: next state for qp
+ * @del_hash: del hash
+ * @term: term message
+ * @termlen: length of term message
+ */
+void irdma_next_iw_state(struct irdma_qp *iwqp, u8 state, u8 del_hash, u8 term,
+ u8 termlen)
+{
+ struct irdma_modify_qp_info info = {};
+
+ info.next_iwarp_state = state;
+ info.remove_hash_idx = del_hash;
+ info.cq_num_valid = true;
+ info.arp_cache_idx_valid = true;
+ info.dont_send_term = true;
+ info.dont_send_fin = true;
+ info.termlen = termlen;
+
+ if (term & IRDMAQP_TERM_SEND_TERM_ONLY)
+ info.dont_send_term = false;
+ if (term & IRDMAQP_TERM_SEND_FIN_ONLY)
+ info.dont_send_fin = false;
+ if (iwqp->sc_qp.term_flags && state == IRDMA_QP_STATE_ERROR)
+ info.reset_tcp_conn = true;
+ iwqp->hw_iwarp_state = state;
+ irdma_hw_modify_qp(iwqp->iwdev, iwqp, &info, 0);
+ iwqp->iwarp_state = info.next_iwarp_state;
+}
+
+/**
+ * irdma_del_local_mac_entry - remove a mac entry from the hw
+ * table
+ * @rf: RDMA PCI function
+ * @idx: the index of the mac ip address to delete
+ */
+void irdma_del_local_mac_entry(struct irdma_pci_f *rf, u16 idx)
+{
+ struct irdma_cqp *iwcqp = &rf->cqp;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
+ if (!cqp_request)
+ return;
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = IRDMA_OP_DELETE_LOCAL_MAC_ENTRY;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.del_local_mac_entry.cqp = &iwcqp->sc_cqp;
+ cqp_info->in.u.del_local_mac_entry.scratch = (uintptr_t)cqp_request;
+ cqp_info->in.u.del_local_mac_entry.entry_idx = idx;
+ cqp_info->in.u.del_local_mac_entry.ignore_ref_count = 0;
+
+ irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(iwcqp, cqp_request);
+}
+
+/**
+ * irdma_add_local_mac_entry - add a mac ip address entry to the
+ * hw table
+ * @rf: RDMA PCI function
+ * @mac_addr: pointer to mac address
+ * @idx: the index of the mac ip address to add
+ */
+int irdma_add_local_mac_entry(struct irdma_pci_f *rf, u8 *mac_addr, u16 idx)
+{
+ struct irdma_local_mac_entry_info *info;
+ struct irdma_cqp *iwcqp = &rf->cqp;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ enum irdma_status_code status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
+ if (!cqp_request)
+ return IRDMA_ERR_NO_MEMORY;
+
+ cqp_info = &cqp_request->info;
+ cqp_info->post_sq = 1;
+ info = &cqp_info->in.u.add_local_mac_entry.info;
+ ether_addr_copy(info->mac_addr, mac_addr);
+ info->entry_idx = idx;
+ cqp_info->in.u.add_local_mac_entry.scratch = (uintptr_t)cqp_request;
+ cqp_info->cqp_cmd = IRDMA_OP_ADD_LOCAL_MAC_ENTRY;
+ cqp_info->in.u.add_local_mac_entry.cqp = &iwcqp->sc_cqp;
+ cqp_info->in.u.add_local_mac_entry.scratch = (uintptr_t)cqp_request;
+
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(iwcqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_alloc_local_mac_entry - allocate a mac entry
+ * @rf: RDMA PCI function
+ * @mac_tbl_idx: the index of the new mac address
+ *
+ * Allocate a mac address entry and update the mac_tbl_idx
+ * to hold the index of the newly created mac address
+ * Return 0 if successful, otherwise return error
+ */
+int irdma_alloc_local_mac_entry(struct irdma_pci_f *rf, u16 *mac_tbl_idx)
+{
+ struct irdma_cqp *iwcqp = &rf->cqp;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ enum irdma_status_code status = 0;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
+ if (!cqp_request)
+ return IRDMA_ERR_NO_MEMORY;
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.alloc_local_mac_entry.cqp = &iwcqp->sc_cqp;
+ cqp_info->in.u.alloc_local_mac_entry.scratch = (uintptr_t)cqp_request;
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ if (!status)
+ *mac_tbl_idx = (u16)cqp_request->compl_info.op_ret_val;
+
+ irdma_put_cqp_request(iwcqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_cqp_manage_apbvt_cmd - send cqp command manage apbvt
+ * @iwdev: irdma device
+ * @accel_local_port: port for apbvt
+ * @add_port: add ordelete port
+ */
+static enum irdma_status_code
+irdma_cqp_manage_apbvt_cmd(struct irdma_device *iwdev, u16 accel_local_port,
+ bool add_port)
+{
+ struct irdma_apbvt_info *info;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ enum irdma_status_code status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, add_port);
+ if (!cqp_request)
+ return IRDMA_ERR_NO_MEMORY;
+
+ cqp_info = &cqp_request->info;
+ info = &cqp_info->in.u.manage_apbvt_entry.info;
+ memset(info, 0, sizeof(*info));
+ info->add = add_port;
+ info->port = accel_local_port;
+ cqp_info->cqp_cmd = IRDMA_OP_MANAGE_APBVT_ENTRY;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.manage_apbvt_entry.cqp = &iwdev->rf->cqp.sc_cqp;
+ cqp_info->in.u.manage_apbvt_entry.scratch = (uintptr_t)cqp_request;
+ ibdev_dbg(&iwdev->ibdev, "DEV: %s: port=0x%04x\n",
+ (!add_port) ? "DELETE" : "ADD", accel_local_port);
+
+ status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
+ irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_add_apbvt - add tcp port to HW apbvt table
+ * @iwdev: irdma device
+ * @port: port for apbvt
+ */
+struct irdma_apbvt_entry *irdma_add_apbvt(struct irdma_device *iwdev, u16 port)
+{
+ struct irdma_cm_core *cm_core = &iwdev->cm_core;
+ struct irdma_apbvt_entry *entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cm_core->apbvt_lock, flags);
+ entry = irdma_lookup_apbvt_entry(cm_core, port);
+ if (entry) {
+ spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
+ return entry;
+ }
+
+ entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!entry) {
+ spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
+ return NULL;
+ }
+
+ entry->port = port;
+ entry->use_cnt = 1;
+ hash_add(cm_core->apbvt_hash_tbl, &entry->hlist, entry->port);
+ spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
+
+ if (irdma_cqp_manage_apbvt_cmd(iwdev, port, true)) {
+ kfree(entry);
+ return NULL;
+ }
+
+ return entry;
+}
+
+/**
+ * irdma_del_apbvt - delete tcp port from HW apbvt table
+ * @iwdev: irdma device
+ * @entry: apbvt entry object
+ */
+void irdma_del_apbvt(struct irdma_device *iwdev,
+ struct irdma_apbvt_entry *entry)
+{
+ struct irdma_cm_core *cm_core = &iwdev->cm_core;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cm_core->apbvt_lock, flags);
+ if (--entry->use_cnt) {
+ spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
+ return;
+ }
+
+ hash_del(&entry->hlist);
+ /* apbvt_lock is held across CQP delete APBVT OP (non-waiting) to
+ * protect against race where add APBVT CQP can race ahead of the delete
+ * APBVT for same port.
+ */
+ irdma_cqp_manage_apbvt_cmd(iwdev, entry->port, false);
+ kfree(entry);
+ spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
+}
+
+/**
+ * irdma_manage_arp_cache - manage hw arp cache
+ * @rf: RDMA PCI function
+ * @mac_addr: mac address ptr
+ * @ip_addr: ip addr for arp cache
+ * @ipv4: flag inicating IPv4
+ * @action: add, delete or modify
+ */
+void irdma_manage_arp_cache(struct irdma_pci_f *rf, unsigned char *mac_addr,
+ u32 *ip_addr, bool ipv4, u32 action)
+{
+ struct irdma_add_arp_cache_entry_info *info;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ int arp_index;
+
+ arp_index = irdma_arp_table(rf, ip_addr, ipv4, mac_addr, action);
+ if (arp_index == -1)
+ return;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false);
+ if (!cqp_request)
+ return;
+
+ cqp_info = &cqp_request->info;
+ if (action == IRDMA_ARP_ADD) {
+ cqp_info->cqp_cmd = IRDMA_OP_ADD_ARP_CACHE_ENTRY;
+ info = &cqp_info->in.u.add_arp_cache_entry.info;
+ memset(info, 0, sizeof(*info));
+ info->arp_index = (u16)arp_index;
+ info->permanent = true;
+ ether_addr_copy(info->mac_addr, mac_addr);
+ cqp_info->in.u.add_arp_cache_entry.scratch =
+ (uintptr_t)cqp_request;
+ cqp_info->in.u.add_arp_cache_entry.cqp = &rf->cqp.sc_cqp;
+ } else {
+ cqp_info->cqp_cmd = IRDMA_OP_DELETE_ARP_CACHE_ENTRY;
+ cqp_info->in.u.del_arp_cache_entry.scratch =
+ (uintptr_t)cqp_request;
+ cqp_info->in.u.del_arp_cache_entry.cqp = &rf->cqp.sc_cqp;
+ cqp_info->in.u.del_arp_cache_entry.arp_index = arp_index;
+ }
+
+ cqp_info->post_sq = 1;
+ irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+}
+
+/**
+ * irdma_send_syn_cqp_callback - do syn/ack after qhash
+ * @cqp_request: qhash cqp completion
+ */
+static void irdma_send_syn_cqp_callback(struct irdma_cqp_request *cqp_request)
+{
+ struct irdma_cm_node *cm_node = cqp_request->param;
+
+ irdma_send_syn(cm_node, 1);
+ irdma_rem_ref_cm_node(cm_node);
+}
+
+/**
+ * irdma_manage_qhash - add or modify qhash
+ * @iwdev: irdma device
+ * @cminfo: cm info for qhash
+ * @etype: type (syn or quad)
+ * @mtype: type of qhash
+ * @cmnode: cmnode associated with connection
+ * @wait: wait for completion
+ */
+enum irdma_status_code
+irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo,
+ enum irdma_quad_entry_type etype,
+ enum irdma_quad_hash_manage_type mtype, void *cmnode,
+ bool wait)
+{
+ struct irdma_qhash_table_info *info;
+ enum irdma_status_code status;
+ struct irdma_cqp *iwcqp = &iwdev->rf->cqp;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_cm_node *cm_node = cmnode;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait);
+ if (!cqp_request)
+ return IRDMA_ERR_NO_MEMORY;
+
+ cqp_info = &cqp_request->info;
+ info = &cqp_info->in.u.manage_qhash_table_entry.info;
+ memset(info, 0, sizeof(*info));
+ info->vsi = &iwdev->vsi;
+ info->manage = mtype;
+ info->entry_type = etype;
+ if (cminfo->vlan_id < VLAN_N_VID) {
+ info->vlan_valid = true;
+ info->vlan_id = cminfo->vlan_id;
+ } else {
+ info->vlan_valid = false;
+ }
+ info->ipv4_valid = cminfo->ipv4;
+ info->user_pri = cminfo->user_pri;
+ ether_addr_copy(info->mac_addr, iwdev->netdev->dev_addr);
+ info->qp_num = cminfo->qh_qpid;
+ info->dest_port = cminfo->loc_port;
+ info->dest_ip[0] = cminfo->loc_addr[0];
+ info->dest_ip[1] = cminfo->loc_addr[1];
+ info->dest_ip[2] = cminfo->loc_addr[2];
+ info->dest_ip[3] = cminfo->loc_addr[3];
+ if (etype == IRDMA_QHASH_TYPE_TCP_ESTABLISHED ||
+ etype == IRDMA_QHASH_TYPE_UDP_UNICAST ||
+ etype == IRDMA_QHASH_TYPE_UDP_MCAST ||
+ etype == IRDMA_QHASH_TYPE_ROCE_MCAST ||
+ etype == IRDMA_QHASH_TYPE_ROCEV2_HW) {
+ info->src_port = cminfo->rem_port;
+ info->src_ip[0] = cminfo->rem_addr[0];
+ info->src_ip[1] = cminfo->rem_addr[1];
+ info->src_ip[2] = cminfo->rem_addr[2];
+ info->src_ip[3] = cminfo->rem_addr[3];
+ }
+ if (cmnode) {
+ cqp_request->callback_fcn = irdma_send_syn_cqp_callback;
+ cqp_request->param = cmnode;
+ if (!wait)
+ refcount_inc(&cm_node->refcnt);
+ }
+ if (info->ipv4_valid)
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: %s caller: %pS loc_port=0x%04x rem_port=0x%04x loc_addr=%pI4 rem_addr=%pI4 mac=%pM, vlan_id=%d cm_node=%p\n",
+ (!mtype) ? "DELETE" : "ADD",
+ __builtin_return_address(0), info->dest_port,
+ info->src_port, info->dest_ip, info->src_ip,
+ info->mac_addr, cminfo->vlan_id,
+ cmnode ? cmnode : NULL);
+ else
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: %s caller: %pS loc_port=0x%04x rem_port=0x%04x loc_addr=%pI6 rem_addr=%pI6 mac=%pM, vlan_id=%d cm_node=%p\n",
+ (!mtype) ? "DELETE" : "ADD",
+ __builtin_return_address(0), info->dest_port,
+ info->src_port, info->dest_ip, info->src_ip,
+ info->mac_addr, cminfo->vlan_id,
+ cmnode ? cmnode : NULL);
+
+ cqp_info->in.u.manage_qhash_table_entry.cqp = &iwdev->rf->cqp.sc_cqp;
+ cqp_info->in.u.manage_qhash_table_entry.scratch = (uintptr_t)cqp_request;
+ cqp_info->cqp_cmd = IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY;
+ cqp_info->post_sq = 1;
+ status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
+ if (status && cm_node && !wait)
+ irdma_rem_ref_cm_node(cm_node);
+
+ irdma_put_cqp_request(iwcqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_hw_flush_wqes_callback - Check return code after flush
+ * @cqp_request: qhash cqp completion
+ */
+static void irdma_hw_flush_wqes_callback(struct irdma_cqp_request *cqp_request)
+{
+ struct irdma_qp_flush_info *hw_info;
+ struct irdma_sc_qp *qp;
+ struct irdma_qp *iwqp;
+ struct cqp_cmds_info *cqp_info;
+
+ cqp_info = &cqp_request->info;
+ hw_info = &cqp_info->in.u.qp_flush_wqes.info;
+ qp = cqp_info->in.u.qp_flush_wqes.qp;
+ iwqp = qp->qp_uk.back_qp;
+
+ if (cqp_request->compl_info.maj_err_code)
+ return;
+
+ if (hw_info->rq &&
+ (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_SQ_WQE_FLUSHED ||
+ cqp_request->compl_info.min_err_code == 0)) {
+ /* RQ WQE flush was requested but did not happen */
+ qp->qp_uk.rq_flush_complete = true;
+ }
+ if (hw_info->sq &&
+ (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_RQ_WQE_FLUSHED ||
+ cqp_request->compl_info.min_err_code == 0)) {
+ if (IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring)) {
+ ibdev_err(&iwqp->iwdev->ibdev, "Flush QP[%d] failed, SQ has more work",
+ qp->qp_uk.qp_id);
+ irdma_ib_qp_event(iwqp, IRDMA_QP_EVENT_CATASTROPHIC);
+ }
+ qp->qp_uk.sq_flush_complete = true;
+ }
+}
+
+/**
+ * irdma_hw_flush_wqes - flush qp's wqe
+ * @rf: RDMA PCI function
+ * @qp: hardware control qp
+ * @info: info for flush
+ * @wait: flag wait for completion
+ */
+enum irdma_status_code irdma_hw_flush_wqes(struct irdma_pci_f *rf,
+ struct irdma_sc_qp *qp,
+ struct irdma_qp_flush_info *info,
+ bool wait)
+{
+ enum irdma_status_code status;
+ struct irdma_qp_flush_info *hw_info;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_qp *iwqp = qp->qp_uk.back_qp;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
+ if (!cqp_request)
+ return IRDMA_ERR_NO_MEMORY;
+
+ cqp_info = &cqp_request->info;
+ if (!wait)
+ cqp_request->callback_fcn = irdma_hw_flush_wqes_callback;
+ hw_info = &cqp_request->info.in.u.qp_flush_wqes.info;
+ memcpy(hw_info, info, sizeof(*hw_info));
+ cqp_info->cqp_cmd = IRDMA_OP_QP_FLUSH_WQES;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.qp_flush_wqes.qp = qp;
+ cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)cqp_request;
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ if (status) {
+ qp->qp_uk.sq_flush_complete = true;
+ qp->qp_uk.rq_flush_complete = true;
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+ return status;
+ }
+
+ if (!wait || cqp_request->compl_info.maj_err_code)
+ goto put_cqp;
+
+ if (info->rq) {
+ if (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_SQ_WQE_FLUSHED ||
+ cqp_request->compl_info.min_err_code == 0) {
+ /* RQ WQE flush was requested but did not happen */
+ qp->qp_uk.rq_flush_complete = true;
+ }
+ }
+ if (info->sq) {
+ if (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_RQ_WQE_FLUSHED ||
+ cqp_request->compl_info.min_err_code == 0) {
+ /*
+ * Handling case where WQE is posted to empty SQ when
+ * flush has not completed
+ */
+ if (IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring)) {
+ struct irdma_cqp_request *new_req;
+
+ if (!qp->qp_uk.sq_flush_complete)
+ goto put_cqp;
+ qp->qp_uk.sq_flush_complete = false;
+ qp->flush_sq = false;
+
+ info->rq = false;
+ info->sq = true;
+ new_req = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
+ if (!new_req) {
+ status = IRDMA_ERR_NO_MEMORY;
+ goto put_cqp;
+ }
+ cqp_info = &new_req->info;
+ hw_info = &new_req->info.in.u.qp_flush_wqes.info;
+ memcpy(hw_info, info, sizeof(*hw_info));
+ cqp_info->cqp_cmd = IRDMA_OP_QP_FLUSH_WQES;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.qp_flush_wqes.qp = qp;
+ cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)new_req;
+
+ status = irdma_handle_cqp_op(rf, new_req);
+ if (new_req->compl_info.maj_err_code ||
+ new_req->compl_info.min_err_code != IRDMA_CQP_COMPL_SQ_WQE_FLUSHED ||
+ status) {
+ ibdev_err(&iwqp->iwdev->ibdev, "fatal QP event: SQ in error but not flushed, qp: %d",
+ iwqp->ibqp.qp_num);
+ qp->qp_uk.sq_flush_complete = false;
+ irdma_ib_qp_event(iwqp, IRDMA_QP_EVENT_CATASTROPHIC);
+ }
+ irdma_put_cqp_request(&rf->cqp, new_req);
+ } else {
+ /* SQ WQE flush was requested but did not happen */
+ qp->qp_uk.sq_flush_complete = true;
+ }
+ } else {
+ if (!IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring))
+ qp->qp_uk.sq_flush_complete = true;
+ }
+ }
+
+ ibdev_dbg(&rf->iwdev->ibdev,
+ "VERBS: qp_id=%d qp_type=%d qpstate=%d ibqpstate=%d last_aeq=%d hw_iw_state=%d maj_err_code=%d min_err_code=%d\n",
+ iwqp->ibqp.qp_num, rf->protocol_used, iwqp->iwarp_state,
+ iwqp->ibqp_state, iwqp->last_aeq, iwqp->hw_iwarp_state,
+ cqp_request->compl_info.maj_err_code,
+ cqp_request->compl_info.min_err_code);
+put_cqp:
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_gen_ae - generate AE
+ * @rf: RDMA PCI function
+ * @qp: qp associated with AE
+ * @info: info for ae
+ * @wait: wait for completion
+ */
+void irdma_gen_ae(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
+ struct irdma_gen_ae_info *info, bool wait)
+{
+ struct irdma_gen_ae_info *ae_info;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
+ if (!cqp_request)
+ return;
+
+ cqp_info = &cqp_request->info;
+ ae_info = &cqp_request->info.in.u.gen_ae.info;
+ memcpy(ae_info, info, sizeof(*ae_info));
+ cqp_info->cqp_cmd = IRDMA_OP_GEN_AE;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.gen_ae.qp = qp;
+ cqp_info->in.u.gen_ae.scratch = (uintptr_t)cqp_request;
+
+ irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+}
+
+void irdma_flush_wqes(struct irdma_qp *iwqp, u32 flush_mask)
+{
+ struct irdma_qp_flush_info info = {};
+ struct irdma_pci_f *rf = iwqp->iwdev->rf;
+ u8 flush_code = iwqp->sc_qp.flush_code;
+
+ if (!(flush_mask & IRDMA_FLUSH_SQ) && !(flush_mask & IRDMA_FLUSH_RQ))
+ return;
+
+ /* Set flush info fields*/
+ info.sq = flush_mask & IRDMA_FLUSH_SQ;
+ info.rq = flush_mask & IRDMA_FLUSH_RQ;
+
+ if (flush_mask & IRDMA_REFLUSH) {
+ if (info.sq)
+ iwqp->sc_qp.flush_sq = false;
+ if (info.rq)
+ iwqp->sc_qp.flush_rq = false;
+ }
+
+ /* Generate userflush errors in CQE */
+ info.sq_major_code = IRDMA_FLUSH_MAJOR_ERR;
+ info.sq_minor_code = FLUSH_GENERAL_ERR;
+ info.rq_major_code = IRDMA_FLUSH_MAJOR_ERR;
+ info.rq_minor_code = FLUSH_GENERAL_ERR;
+ info.userflushcode = true;
+ if (flush_code) {
+ if (info.sq && iwqp->sc_qp.sq_flush_code)
+ info.sq_minor_code = flush_code;
+ if (info.rq && iwqp->sc_qp.rq_flush_code)
+ info.rq_minor_code = flush_code;
+ }
+
+ /* Issue flush */
+ (void)irdma_hw_flush_wqes(rf, &iwqp->sc_qp, &info,
+ flush_mask & IRDMA_FLUSH_WAIT);
+ iwqp->flush_issued = true;
+}
diff --git a/drivers/infiniband/hw/irdma/i40iw_hw.c b/drivers/infiniband/hw/irdma/i40iw_hw.c
new file mode 100644
index 000000000000..64148ad8a604
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/i40iw_hw.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#include "osdep.h"
+#include "type.h"
+#include "i40iw_hw.h"
+#include "status.h"
+#include "protos.h"
+
+static u32 i40iw_regs[IRDMA_MAX_REGS] = {
+ I40E_PFPE_CQPTAIL,
+ I40E_PFPE_CQPDB,
+ I40E_PFPE_CCQPSTATUS,
+ I40E_PFPE_CCQPHIGH,
+ I40E_PFPE_CCQPLOW,
+ I40E_PFPE_CQARM,
+ I40E_PFPE_CQACK,
+ I40E_PFPE_AEQALLOC,
+ I40E_PFPE_CQPERRCODES,
+ I40E_PFPE_WQEALLOC,
+ I40E_PFINT_DYN_CTLN(0),
+ I40IW_DB_ADDR_OFFSET,
+
+ I40E_GLPCI_LBARCTRL,
+ I40E_GLPE_CPUSTATUS0,
+ I40E_GLPE_CPUSTATUS1,
+ I40E_GLPE_CPUSTATUS2,
+ I40E_PFINT_AEQCTL,
+ I40E_PFINT_CEQCTL(0),
+ I40E_VSIQF_CTL(0),
+ I40E_PFHMC_PDINV,
+ I40E_GLHMC_VFPDINV(0),
+ I40E_GLPE_CRITERR,
+ 0xffffffff /* PFINT_RATEN not used in FPK */
+};
+
+static u32 i40iw_stat_offsets_32[IRDMA_HW_STAT_INDEX_MAX_32] = {
+ I40E_GLPES_PFIP4RXDISCARD(0),
+ I40E_GLPES_PFIP4RXTRUNC(0),
+ I40E_GLPES_PFIP4TXNOROUTE(0),
+ I40E_GLPES_PFIP6RXDISCARD(0),
+ I40E_GLPES_PFIP6RXTRUNC(0),
+ I40E_GLPES_PFIP6TXNOROUTE(0),
+ I40E_GLPES_PFTCPRTXSEG(0),
+ I40E_GLPES_PFTCPRXOPTERR(0),
+ I40E_GLPES_PFTCPRXPROTOERR(0),
+ I40E_GLPES_PFRXVLANERR(0)
+};
+
+static u32 i40iw_stat_offsets_64[IRDMA_HW_STAT_INDEX_MAX_64] = {
+ I40E_GLPES_PFIP4RXOCTSLO(0),
+ I40E_GLPES_PFIP4RXPKTSLO(0),
+ I40E_GLPES_PFIP4RXFRAGSLO(0),
+ I40E_GLPES_PFIP4RXMCPKTSLO(0),
+ I40E_GLPES_PFIP4TXOCTSLO(0),
+ I40E_GLPES_PFIP4TXPKTSLO(0),
+ I40E_GLPES_PFIP4TXFRAGSLO(0),
+ I40E_GLPES_PFIP4TXMCPKTSLO(0),
+ I40E_GLPES_PFIP6RXOCTSLO(0),
+ I40E_GLPES_PFIP6RXPKTSLO(0),
+ I40E_GLPES_PFIP6RXFRAGSLO(0),
+ I40E_GLPES_PFIP6RXMCPKTSLO(0),
+ I40E_GLPES_PFIP6TXOCTSLO(0),
+ I40E_GLPES_PFIP6TXPKTSLO(0),
+ I40E_GLPES_PFIP6TXFRAGSLO(0),
+ I40E_GLPES_PFIP6TXMCPKTSLO(0),
+ I40E_GLPES_PFTCPRXSEGSLO(0),
+ I40E_GLPES_PFTCPTXSEGLO(0),
+ I40E_GLPES_PFRDMARXRDSLO(0),
+ I40E_GLPES_PFRDMARXSNDSLO(0),
+ I40E_GLPES_PFRDMARXWRSLO(0),
+ I40E_GLPES_PFRDMATXRDSLO(0),
+ I40E_GLPES_PFRDMATXSNDSLO(0),
+ I40E_GLPES_PFRDMATXWRSLO(0),
+ I40E_GLPES_PFRDMAVBNDLO(0),
+ I40E_GLPES_PFRDMAVINVLO(0),
+ I40E_GLPES_PFIP4RXMCOCTSLO(0),
+ I40E_GLPES_PFIP4TXMCOCTSLO(0),
+ I40E_GLPES_PFIP6RXMCOCTSLO(0),
+ I40E_GLPES_PFIP6TXMCOCTSLO(0),
+ I40E_GLPES_PFUDPRXPKTSLO(0),
+ I40E_GLPES_PFUDPTXPKTSLO(0)
+};
+
+static u64 i40iw_masks[IRDMA_MAX_MASKS] = {
+ I40E_PFPE_CCQPSTATUS_CCQP_DONE,
+ I40E_PFPE_CCQPSTATUS_CCQP_ERR,
+ I40E_CQPSQ_STAG_PDID,
+ I40E_CQPSQ_CQ_CEQID,
+ I40E_CQPSQ_CQ_CQID,
+ I40E_COMMIT_FPM_CQCNT,
+};
+
+static u64 i40iw_shifts[IRDMA_MAX_SHIFTS] = {
+ I40E_PFPE_CCQPSTATUS_CCQP_DONE_S,
+ I40E_PFPE_CCQPSTATUS_CCQP_ERR_S,
+ I40E_CQPSQ_STAG_PDID_S,
+ I40E_CQPSQ_CQ_CEQID_S,
+ I40E_CQPSQ_CQ_CQID_S,
+ I40E_COMMIT_FPM_CQCNT_S,
+};
+
+/**
+ * i40iw_config_ceq- Configure CEQ interrupt
+ * @dev: pointer to the device structure
+ * @ceq_id: Completion Event Queue ID
+ * @idx: vector index
+ * @enable: Enable CEQ interrupt when true
+ */
+static void i40iw_config_ceq(struct irdma_sc_dev *dev, u32 ceq_id, u32 idx,
+ bool enable)
+{
+ u32 reg_val;
+
+ reg_val = FIELD_PREP(I40E_PFINT_LNKLSTN_FIRSTQ_INDX, ceq_id) |
+ FIELD_PREP(I40E_PFINT_LNKLSTN_FIRSTQ_TYPE, QUEUE_TYPE_CEQ);
+ wr32(dev->hw, I40E_PFINT_LNKLSTN(idx - 1), reg_val);
+
+ reg_val = FIELD_PREP(I40E_PFINT_DYN_CTLN_ITR_INDX, 0x3) |
+ FIELD_PREP(I40E_PFINT_DYN_CTLN_INTENA, 0x1);
+ wr32(dev->hw, I40E_PFINT_DYN_CTLN(idx - 1), reg_val);
+
+ reg_val = FIELD_PREP(IRDMA_GLINT_CEQCTL_CAUSE_ENA, enable) |
+ FIELD_PREP(IRDMA_GLINT_CEQCTL_MSIX_INDX, idx) |
+ FIELD_PREP(I40E_PFINT_CEQCTL_NEXTQ_INDX, NULL_QUEUE_INDEX) |
+ FIELD_PREP(IRDMA_GLINT_CEQCTL_ITR_INDX, 0x3);
+
+ wr32(dev->hw, i40iw_regs[IRDMA_GLINT_CEQCTL] + 4 * ceq_id, reg_val);
+}
+
+/**
+ * i40iw_ena_irq - Enable interrupt
+ * @dev: pointer to the device structure
+ * @idx: vector index
+ */
+static void i40iw_ena_irq(struct irdma_sc_dev *dev, u32 idx)
+{
+ u32 val;
+
+ val = FIELD_PREP(IRDMA_GLINT_DYN_CTL_INTENA, 0x1) |
+ FIELD_PREP(IRDMA_GLINT_DYN_CTL_CLEARPBA, 0x1) |
+ FIELD_PREP(IRDMA_GLINT_DYN_CTL_ITR_INDX, 0x3);
+ wr32(dev->hw, i40iw_regs[IRDMA_GLINT_DYN_CTL] + 4 * (idx - 1), val);
+}
+
+/**
+ * i40iw_disable_irq - Disable interrupt
+ * @dev: pointer to the device structure
+ * @idx: vector index
+ */
+static void i40iw_disable_irq(struct irdma_sc_dev *dev, u32 idx)
+{
+ wr32(dev->hw, i40iw_regs[IRDMA_GLINT_DYN_CTL] + 4 * (idx - 1), 0);
+}
+
+static const struct irdma_irq_ops i40iw_irq_ops = {
+ .irdma_cfg_aeq = irdma_cfg_aeq,
+ .irdma_cfg_ceq = i40iw_config_ceq,
+ .irdma_dis_irq = i40iw_disable_irq,
+ .irdma_en_irq = i40iw_ena_irq,
+};
+
+void i40iw_init_hw(struct irdma_sc_dev *dev)
+{
+ int i;
+ u8 __iomem *hw_addr;
+
+ for (i = 0; i < IRDMA_MAX_REGS; ++i) {
+ hw_addr = dev->hw->hw_addr;
+
+ if (i == IRDMA_DB_ADDR_OFFSET)
+ hw_addr = NULL;
+
+ dev->hw_regs[i] = (u32 __iomem *)(i40iw_regs[i] + hw_addr);
+ }
+
+ for (i = 0; i < IRDMA_HW_STAT_INDEX_MAX_32; ++i)
+ dev->hw_stats_regs_32[i] = i40iw_stat_offsets_32[i];
+
+ for (i = 0; i < IRDMA_HW_STAT_INDEX_MAX_64; ++i)
+ dev->hw_stats_regs_64[i] = i40iw_stat_offsets_64[i];
+
+ dev->hw_attrs.first_hw_vf_fpm_id = I40IW_FIRST_VF_FPM_ID;
+ dev->hw_attrs.max_hw_vf_fpm_id = IRDMA_MAX_VF_FPM_ID;
+
+ for (i = 0; i < IRDMA_MAX_SHIFTS; ++i)
+ dev->hw_shifts[i] = i40iw_shifts[i];
+
+ for (i = 0; i < IRDMA_MAX_MASKS; ++i)
+ dev->hw_masks[i] = i40iw_masks[i];
+
+ dev->wqe_alloc_db = dev->hw_regs[IRDMA_WQEALLOC];
+ dev->cq_arm_db = dev->hw_regs[IRDMA_CQARM];
+ dev->aeq_alloc_db = dev->hw_regs[IRDMA_AEQALLOC];
+ dev->cqp_db = dev->hw_regs[IRDMA_CQPDB];
+ dev->cq_ack_db = dev->hw_regs[IRDMA_CQACK];
+ dev->ceq_itr_mask_db = NULL;
+ dev->aeq_itr_mask_db = NULL;
+ dev->irq_ops = &i40iw_irq_ops;
+
+ /* Setup the hardware limits, hmc may limit further */
+ dev->hw_attrs.uk_attrs.max_hw_wq_frags = I40IW_MAX_WQ_FRAGMENT_COUNT;
+ dev->hw_attrs.uk_attrs.max_hw_read_sges = I40IW_MAX_SGE_RD;
+ dev->hw_attrs.max_hw_device_pages = I40IW_MAX_PUSH_PAGE_COUNT;
+ dev->hw_attrs.uk_attrs.max_hw_inline = I40IW_MAX_INLINE_DATA_SIZE;
+ dev->hw_attrs.max_hw_ird = I40IW_MAX_IRD_SIZE;
+ dev->hw_attrs.max_hw_ord = I40IW_MAX_ORD_SIZE;
+ dev->hw_attrs.max_hw_wqes = I40IW_MAX_WQ_ENTRIES;
+ dev->hw_attrs.uk_attrs.max_hw_rq_quanta = I40IW_QP_SW_MAX_RQ_QUANTA;
+ dev->hw_attrs.uk_attrs.max_hw_wq_quanta = I40IW_QP_SW_MAX_WQ_QUANTA;
+ dev->hw_attrs.uk_attrs.max_hw_sq_chunk = I40IW_MAX_QUANTA_PER_WR;
+ dev->hw_attrs.max_hw_pds = I40IW_MAX_PDS;
+ dev->hw_attrs.max_stat_inst = I40IW_MAX_STATS_COUNT;
+ dev->hw_attrs.max_hw_outbound_msg_size = I40IW_MAX_OUTBOUND_MSG_SIZE;
+ dev->hw_attrs.max_hw_inbound_msg_size = I40IW_MAX_INBOUND_MSG_SIZE;
+ dev->hw_attrs.max_qp_wr = I40IW_MAX_QP_WRS;
+}
diff --git a/drivers/infiniband/hw/irdma/i40iw_hw.h b/drivers/infiniband/hw/irdma/i40iw_hw.h
new file mode 100644
index 000000000000..1c438b3593ea
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/i40iw_hw.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#ifndef I40IW_HW_H
+#define I40IW_HW_H
+#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */
+#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */
+#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */
+#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */
+#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */
+#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */
+#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */
+#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */
+#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */
+#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */
+#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
+
+#define I40E_PFPE_CQPTAIL 0x00008080 /* Reset: PFR */
+
+#define I40E_PFPE_CQPDB 0x00008000 /* Reset: PFR */
+#define I40E_PFPE_CCQPSTATUS 0x00008100 /* Reset: PFR */
+#define I40E_PFPE_CCQPHIGH 0x00008200 /* Reset: PFR */
+#define I40E_PFPE_CCQPLOW 0x00008180 /* Reset: PFR */
+#define I40E_PFPE_CQARM 0x00131080 /* Reset: PFR */
+#define I40E_PFPE_CQACK 0x00131100 /* Reset: PFR */
+#define I40E_PFPE_AEQALLOC 0x00131180 /* Reset: PFR */
+#define I40E_PFPE_CQPERRCODES 0x00008880 /* Reset: PFR */
+#define I40E_PFPE_WQEALLOC 0x00138C00 /* Reset: PFR */
+#define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */
+#define I40E_GLPE_CPUSTATUS0 0x0000D040 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS1 0x0000D044 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS2 0x0000D048 /* Reset: PE_CORER */
+#define I40E_GLPE_CRITERR 0x000B4000 /* Reset: PE_CORER */
+#define I40E_PFHMC_PDINV 0x000C0300 /* Reset: PFR */
+#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
+#define I40E_PFINT_AEQCTL 0x00038700 /* Reset: CORER */
+
+#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+
+#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+
+#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+
+#define I40IW_DB_ADDR_OFFSET (4 * 1024 * 1024 - 64 * 1024)
+
+#define I40IW_VF_DB_ADDR_OFFSET (64 * 1024)
+
+#define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
+#define I40E_PFINT_LNKLSTN_MAX_INDEX 511
+#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX GENMASK(10, 0)
+#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE GENMASK(12, 11)
+
+#define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: CORER */
+#define I40E_PFINT_CEQCTL_MAX_INDEX 511
+
+/* shifts/masks for FLD_[LS/RS]_64 macros used in device table */
+#define I40E_PFINT_CEQCTL_MSIX_INDX_S 0
+#define I40E_PFINT_CEQCTL_MSIX_INDX GENMASK(7, 0)
+#define I40E_PFINT_CEQCTL_ITR_INDX_S 11
+#define I40E_PFINT_CEQCTL_ITR_INDX GENMASK(12, 11)
+#define I40E_PFINT_CEQCTL_MSIX0_INDX_S 13
+#define I40E_PFINT_CEQCTL_MSIX0_INDX GENMASK(15, 13)
+#define I40E_PFINT_CEQCTL_NEXTQ_INDX_S 16
+#define I40E_PFINT_CEQCTL_NEXTQ_INDX GENMASK(26, 16)
+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_S 27
+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE GENMASK(28, 27)
+#define I40E_PFINT_CEQCTL_CAUSE_ENA_S 30
+#define I40E_PFINT_CEQCTL_CAUSE_ENA BIT(30)
+#define I40E_PFINT_CEQCTL_INTEVENT_S 31
+#define I40E_PFINT_CEQCTL_INTEVENT BIT(31)
+#define I40E_CQPSQ_STAG_PDID_S 48
+#define I40E_CQPSQ_STAG_PDID GENMASK_ULL(62, 48)
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_S 0
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE BIT_ULL(0)
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_S 31
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR BIT_ULL(31)
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_S 3
+#define I40E_PFINT_DYN_CTLN_ITR_INDX GENMASK(4, 3)
+#define I40E_PFINT_DYN_CTLN_INTENA_S 0
+#define I40E_PFINT_DYN_CTLN_INTENA BIT(0)
+#define I40E_CQPSQ_CQ_CEQID_S 24
+#define I40E_CQPSQ_CQ_CEQID GENMASK(30, 24)
+#define I40E_CQPSQ_CQ_CQID_S 0
+#define I40E_CQPSQ_CQ_CQID GENMASK_ULL(15, 0)
+#define I40E_COMMIT_FPM_CQCNT_S 0
+#define I40E_COMMIT_FPM_CQCNT GENMASK_ULL(17, 0)
+
+#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4))
+
+enum i40iw_device_caps_const {
+ I40IW_MAX_WQ_FRAGMENT_COUNT = 3,
+ I40IW_MAX_SGE_RD = 1,
+ I40IW_MAX_PUSH_PAGE_COUNT = 0,
+ I40IW_MAX_INLINE_DATA_SIZE = 48,
+ I40IW_MAX_IRD_SIZE = 63,
+ I40IW_MAX_ORD_SIZE = 127,
+ I40IW_MAX_WQ_ENTRIES = 2048,
+ I40IW_MAX_WQE_SIZE_RQ = 128,
+ I40IW_MAX_PDS = 32768,
+ I40IW_MAX_STATS_COUNT = 16,
+ I40IW_MAX_CQ_SIZE = 1048575,
+ I40IW_MAX_OUTBOUND_MSG_SIZE = 2147483647,
+ I40IW_MAX_INBOUND_MSG_SIZE = 2147483647,
+};
+
+#define I40IW_QP_WQE_MIN_SIZE 32
+#define I40IW_QP_WQE_MAX_SIZE 128
+#define I40IW_QP_SW_MIN_WQSIZE 4
+#define I40IW_MAX_RQ_WQE_SHIFT 2
+#define I40IW_MAX_QUANTA_PER_WR 2
+
+#define I40IW_QP_SW_MAX_SQ_QUANTA 2048
+#define I40IW_QP_SW_MAX_RQ_QUANTA 16384
+#define I40IW_QP_SW_MAX_WQ_QUANTA 2048
+#define I40IW_MAX_QP_WRS ((I40IW_QP_SW_MAX_SQ_QUANTA - IRDMA_SQ_RSVD) / I40IW_MAX_QUANTA_PER_WR)
+#define I40IW_FIRST_VF_FPM_ID 16
+#define QUEUE_TYPE_CEQ 2
+#define NULL_QUEUE_INDEX 0x7FF
+
+void i40iw_init_hw(struct irdma_sc_dev *dev);
+#endif /* I40IW_HW_H */
diff --git a/drivers/infiniband/hw/irdma/i40iw_if.c b/drivers/infiniband/hw/irdma/i40iw_if.c
new file mode 100644
index 000000000000..bddf88194d09
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/i40iw_if.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#include "main.h"
+#include "i40iw_hw.h"
+#include <linux/net/intel/i40e_client.h>
+
+static struct i40e_client i40iw_client;
+
+/**
+ * i40iw_l2param_change - handle mss change
+ * @cdev_info: parent lan device information structure with data/ops
+ * @client: client for parameter change
+ * @params: new parameters from L2
+ */
+static void i40iw_l2param_change(struct i40e_info *cdev_info,
+ struct i40e_client *client,
+ struct i40e_params *params)
+{
+ struct irdma_l2params l2params = {};
+ struct irdma_device *iwdev;
+ struct ib_device *ibdev;
+
+ ibdev = ib_device_get_by_netdev(cdev_info->netdev, RDMA_DRIVER_IRDMA);
+ if (!ibdev)
+ return;
+
+ iwdev = to_iwdev(ibdev);
+
+ if (iwdev->vsi.mtu != params->mtu) {
+ l2params.mtu_changed = true;
+ l2params.mtu = params->mtu;
+ }
+ irdma_change_l2params(&iwdev->vsi, &l2params);
+ ib_device_put(ibdev);
+}
+
+/**
+ * i40iw_close - client interface operation close for iwarp/uda device
+ * @cdev_info: parent lan device information structure with data/ops
+ * @client: client to close
+ * @reset: flag to indicate close on reset
+ *
+ * Called by the lan driver during the processing of client unregister
+ * Destroy and clean up the driver resources
+ */
+static void i40iw_close(struct i40e_info *cdev_info, struct i40e_client *client,
+ bool reset)
+{
+ struct irdma_device *iwdev;
+ struct ib_device *ibdev;
+
+ ibdev = ib_device_get_by_netdev(cdev_info->netdev, RDMA_DRIVER_IRDMA);
+ if (WARN_ON(!ibdev))
+ return;
+
+ iwdev = to_iwdev(ibdev);
+ if (reset)
+ iwdev->reset = true;
+
+ iwdev->iw_status = 0;
+ irdma_port_ibevent(iwdev);
+ ib_unregister_device_and_put(ibdev);
+ pr_debug("INIT: Gen1 PF[%d] close complete\n", PCI_FUNC(cdev_info->pcidev->devfn));
+}
+
+static void i40iw_request_reset(struct irdma_pci_f *rf)
+{
+ struct i40e_info *cdev_info = rf->cdev;
+
+ cdev_info->ops->request_reset(cdev_info, &i40iw_client, 1);
+}
+
+static void i40iw_fill_device_info(struct irdma_device *iwdev, struct i40e_info *cdev_info)
+{
+ struct irdma_pci_f *rf = iwdev->rf;
+
+ rf->rdma_ver = IRDMA_GEN_1;
+ rf->gen_ops.request_reset = i40iw_request_reset;
+ rf->pcidev = cdev_info->pcidev;
+ rf->hw.hw_addr = cdev_info->hw_addr;
+ rf->cdev = cdev_info;
+ rf->msix_count = cdev_info->msix_count;
+ rf->msix_entries = cdev_info->msix_entries;
+ rf->limits_sel = 5;
+ rf->protocol_used = IRDMA_IWARP_PROTOCOL_ONLY;
+ rf->iwdev = iwdev;
+
+ iwdev->init_state = INITIAL_STATE;
+ iwdev->rcv_wnd = IRDMA_CM_DEFAULT_RCV_WND_SCALED;
+ iwdev->rcv_wscale = IRDMA_CM_DEFAULT_RCV_WND_SCALE;
+ iwdev->netdev = cdev_info->netdev;
+ iwdev->vsi_num = 0;
+}
+
+/**
+ * i40iw_open - client interface operation open for iwarp/uda device
+ * @cdev_info: parent lan device information structure with data/ops
+ * @client: iwarp client information, provided during registration
+ *
+ * Called by the lan driver during the processing of client register
+ * Create device resources, set up queues, pble and hmc objects and
+ * register the device with the ib verbs interface
+ * Return 0 if successful, otherwise return error
+ */
+static int i40iw_open(struct i40e_info *cdev_info, struct i40e_client *client)
+{
+ struct irdma_l2params l2params = {};
+ struct irdma_device *iwdev;
+ struct irdma_pci_f *rf;
+ int err = -EIO;
+ int i;
+ u16 qset;
+ u16 last_qset = IRDMA_NO_QSET;
+
+ iwdev = ib_alloc_device(irdma_device, ibdev);
+ if (!iwdev)
+ return -ENOMEM;
+
+ iwdev->rf = kzalloc(sizeof(*rf), GFP_KERNEL);
+ if (!iwdev->rf) {
+ ib_dealloc_device(&iwdev->ibdev);
+ return -ENOMEM;
+ }
+
+ i40iw_fill_device_info(iwdev, cdev_info);
+ rf = iwdev->rf;
+
+ if (irdma_ctrl_init_hw(rf)) {
+ err = -EIO;
+ goto err_ctrl_init;
+ }
+
+ l2params.mtu = (cdev_info->params.mtu) ? cdev_info->params.mtu : IRDMA_DEFAULT_MTU;
+ for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++) {
+ qset = cdev_info->params.qos.prio_qos[i].qs_handle;
+ l2params.up2tc[i] = cdev_info->params.qos.prio_qos[i].tc;
+ l2params.qs_handle_list[i] = qset;
+ if (last_qset == IRDMA_NO_QSET)
+ last_qset = qset;
+ else if ((qset != last_qset) && (qset != IRDMA_NO_QSET))
+ iwdev->dcb = true;
+ }
+
+ if (irdma_rt_init_hw(iwdev, &l2params)) {
+ err = -EIO;
+ goto err_rt_init;
+ }
+
+ err = irdma_ib_register_device(iwdev);
+ if (err)
+ goto err_ibreg;
+
+ ibdev_dbg(&iwdev->ibdev, "INIT: Gen1 PF[%d] open success\n",
+ PCI_FUNC(rf->pcidev->devfn));
+
+ return 0;
+
+err_ibreg:
+ irdma_rt_deinit_hw(iwdev);
+err_rt_init:
+ irdma_ctrl_deinit_hw(rf);
+err_ctrl_init:
+ kfree(iwdev->rf);
+ ib_dealloc_device(&iwdev->ibdev);
+
+ return err;
+}
+
+/* client interface functions */
+static const struct i40e_client_ops i40e_ops = {
+ .open = i40iw_open,
+ .close = i40iw_close,
+ .l2_param_change = i40iw_l2param_change
+};
+
+static struct i40e_client i40iw_client = {
+ .ops = &i40e_ops,
+ .type = I40E_CLIENT_IWARP,
+};
+
+static int i40iw_probe(struct auxiliary_device *aux_dev, const struct auxiliary_device_id *id)
+{
+ struct i40e_auxiliary_device *i40e_adev = container_of(aux_dev,
+ struct i40e_auxiliary_device,
+ aux_dev);
+ struct i40e_info *cdev_info = i40e_adev->ldev;
+
+ strncpy(i40iw_client.name, "irdma", I40E_CLIENT_STR_LENGTH);
+ i40e_client_device_register(cdev_info, &i40iw_client);
+
+ return 0;
+}
+
+static void i40iw_remove(struct auxiliary_device *aux_dev)
+{
+ struct i40e_auxiliary_device *i40e_adev = container_of(aux_dev,
+ struct i40e_auxiliary_device,
+ aux_dev);
+ struct i40e_info *cdev_info = i40e_adev->ldev;
+
+ return i40e_client_device_unregister(cdev_info);
+}
+
+static const struct auxiliary_device_id i40iw_auxiliary_id_table[] = {
+ {.name = "i40e.iwarp", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(auxiliary, i40iw_auxiliary_id_table);
+
+struct auxiliary_driver i40iw_auxiliary_drv = {
+ .name = "gen_1",
+ .id_table = i40iw_auxiliary_id_table,
+ .probe = i40iw_probe,
+ .remove = i40iw_remove,
+};
diff --git a/drivers/infiniband/hw/irdma/icrdma_hw.c b/drivers/infiniband/hw/irdma/icrdma_hw.c
new file mode 100644
index 000000000000..cf53b17510cd
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/icrdma_hw.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2017 - 2021 Intel Corporation */
+#include "osdep.h"
+#include "type.h"
+#include "icrdma_hw.h"
+
+static u32 icrdma_regs[IRDMA_MAX_REGS] = {
+ PFPE_CQPTAIL,
+ PFPE_CQPDB,
+ PFPE_CCQPSTATUS,
+ PFPE_CCQPHIGH,
+ PFPE_CCQPLOW,
+ PFPE_CQARM,
+ PFPE_CQACK,
+ PFPE_AEQALLOC,
+ PFPE_CQPERRCODES,
+ PFPE_WQEALLOC,
+ GLINT_DYN_CTL(0),
+ ICRDMA_DB_ADDR_OFFSET,
+
+ GLPCI_LBARCTRL,
+ GLPE_CPUSTATUS0,
+ GLPE_CPUSTATUS1,
+ GLPE_CPUSTATUS2,
+ PFINT_AEQCTL,
+ GLINT_CEQCTL(0),
+ VSIQF_PE_CTL1(0),
+ PFHMC_PDINV,
+ GLHMC_VFPDINV(0),
+ GLPE_CRITERR,
+ GLINT_RATE(0),
+};
+
+static u64 icrdma_masks[IRDMA_MAX_MASKS] = {
+ ICRDMA_CCQPSTATUS_CCQP_DONE,
+ ICRDMA_CCQPSTATUS_CCQP_ERR,
+ ICRDMA_CQPSQ_STAG_PDID,
+ ICRDMA_CQPSQ_CQ_CEQID,
+ ICRDMA_CQPSQ_CQ_CQID,
+ ICRDMA_COMMIT_FPM_CQCNT,
+};
+
+static u64 icrdma_shifts[IRDMA_MAX_SHIFTS] = {
+ ICRDMA_CCQPSTATUS_CCQP_DONE_S,
+ ICRDMA_CCQPSTATUS_CCQP_ERR_S,
+ ICRDMA_CQPSQ_STAG_PDID_S,
+ ICRDMA_CQPSQ_CQ_CEQID_S,
+ ICRDMA_CQPSQ_CQ_CQID_S,
+ ICRDMA_COMMIT_FPM_CQCNT_S,
+};
+
+/**
+ * icrdma_ena_irq - Enable interrupt
+ * @dev: pointer to the device structure
+ * @idx: vector index
+ */
+static void icrdma_ena_irq(struct irdma_sc_dev *dev, u32 idx)
+{
+ u32 val;
+ u32 interval = 0;
+
+ if (dev->ceq_itr && dev->aeq->msix_idx != idx)
+ interval = dev->ceq_itr >> 1; /* 2 usec units */
+ val = FIELD_PREP(IRDMA_GLINT_DYN_CTL_ITR_INDX, 0) |
+ FIELD_PREP(IRDMA_GLINT_DYN_CTL_INTERVAL, interval) |
+ FIELD_PREP(IRDMA_GLINT_DYN_CTL_INTENA, 1) |
+ FIELD_PREP(IRDMA_GLINT_DYN_CTL_CLEARPBA, 1);
+
+ if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1)
+ writel(val, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + idx);
+ else
+ writel(val, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + (idx - 1));
+}
+
+/**
+ * icrdma_disable_irq - Disable interrupt
+ * @dev: pointer to the device structure
+ * @idx: vector index
+ */
+static void icrdma_disable_irq(struct irdma_sc_dev *dev, u32 idx)
+{
+ if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1)
+ writel(0, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + idx);
+ else
+ writel(0, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + (idx - 1));
+}
+
+/**
+ * icrdma_cfg_ceq- Configure CEQ interrupt
+ * @dev: pointer to the device structure
+ * @ceq_id: Completion Event Queue ID
+ * @idx: vector index
+ * @enable: True to enable, False disables
+ */
+static void icrdma_cfg_ceq(struct irdma_sc_dev *dev, u32 ceq_id, u32 idx,
+ bool enable)
+{
+ u32 reg_val;
+
+ reg_val = FIELD_PREP(IRDMA_GLINT_CEQCTL_CAUSE_ENA, enable) |
+ FIELD_PREP(IRDMA_GLINT_CEQCTL_MSIX_INDX, idx) |
+ FIELD_PREP(IRDMA_GLINT_CEQCTL_ITR_INDX, 3);
+
+ writel(reg_val, dev->hw_regs[IRDMA_GLINT_CEQCTL] + ceq_id);
+}
+
+static const struct irdma_irq_ops icrdma_irq_ops = {
+ .irdma_cfg_aeq = irdma_cfg_aeq,
+ .irdma_cfg_ceq = icrdma_cfg_ceq,
+ .irdma_dis_irq = icrdma_disable_irq,
+ .irdma_en_irq = icrdma_ena_irq,
+};
+
+void icrdma_init_hw(struct irdma_sc_dev *dev)
+{
+ int i;
+ u8 __iomem *hw_addr;
+
+ for (i = 0; i < IRDMA_MAX_REGS; ++i) {
+ hw_addr = dev->hw->hw_addr;
+
+ if (i == IRDMA_DB_ADDR_OFFSET)
+ hw_addr = NULL;
+
+ dev->hw_regs[i] = (u32 __iomem *)(hw_addr + icrdma_regs[i]);
+ }
+ dev->hw_attrs.max_hw_vf_fpm_id = IRDMA_MAX_VF_FPM_ID;
+ dev->hw_attrs.first_hw_vf_fpm_id = IRDMA_FIRST_VF_FPM_ID;
+
+ for (i = 0; i < IRDMA_MAX_SHIFTS; ++i)
+ dev->hw_shifts[i] = icrdma_shifts[i];
+
+ for (i = 0; i < IRDMA_MAX_MASKS; ++i)
+ dev->hw_masks[i] = icrdma_masks[i];
+
+ dev->wqe_alloc_db = dev->hw_regs[IRDMA_WQEALLOC];
+ dev->cq_arm_db = dev->hw_regs[IRDMA_CQARM];
+ dev->aeq_alloc_db = dev->hw_regs[IRDMA_AEQALLOC];
+ dev->cqp_db = dev->hw_regs[IRDMA_CQPDB];
+ dev->cq_ack_db = dev->hw_regs[IRDMA_CQACK];
+ dev->irq_ops = &icrdma_irq_ops;
+ dev->hw_attrs.max_hw_ird = ICRDMA_MAX_IRD_SIZE;
+ dev->hw_attrs.max_hw_ord = ICRDMA_MAX_ORD_SIZE;
+ dev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT;
+
+ dev->hw_attrs.uk_attrs.max_hw_sq_chunk = IRDMA_MAX_QUANTA_PER_WR;
+ dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_RTS_AE |
+ IRDMA_FEATURE_CQ_RESIZE;
+}
diff --git a/drivers/infiniband/hw/irdma/icrdma_hw.h b/drivers/infiniband/hw/irdma/icrdma_hw.h
new file mode 100644
index 000000000000..b65c463abf0b
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/icrdma_hw.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2017 - 2021 Intel Corporation */
+#ifndef ICRDMA_HW_H
+#define ICRDMA_HW_H
+
+#include "irdma.h"
+
+#define VFPE_CQPTAIL1 0x0000a000
+#define VFPE_CQPDB1 0x0000bc00
+#define VFPE_CCQPSTATUS1 0x0000b800
+#define VFPE_CCQPHIGH1 0x00009800
+#define VFPE_CCQPLOW1 0x0000ac00
+#define VFPE_CQARM1 0x0000b400
+#define VFPE_CQARM1 0x0000b400
+#define VFPE_CQACK1 0x0000b000
+#define VFPE_AEQALLOC1 0x0000a400
+#define VFPE_CQPERRCODES1 0x00009c00
+#define VFPE_WQEALLOC1 0x0000c000
+#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4)) /* _i=0...63 */
+
+#define PFPE_CQPTAIL 0x00500880
+#define PFPE_CQPDB 0x00500800
+#define PFPE_CCQPSTATUS 0x0050a000
+#define PFPE_CCQPHIGH 0x0050a100
+#define PFPE_CCQPLOW 0x0050a080
+#define PFPE_CQARM 0x00502c00
+#define PFPE_CQACK 0x00502c80
+#define PFPE_AEQALLOC 0x00502d00
+#define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4)) /* _i=0...2047 */
+#define GLPCI_LBARCTRL 0x0009de74
+#define GLPE_CPUSTATUS0 0x0050ba5c
+#define GLPE_CPUSTATUS1 0x0050ba60
+#define GLPE_CPUSTATUS2 0x0050ba64
+#define PFINT_AEQCTL 0x0016cb00
+#define PFPE_CQPERRCODES 0x0050a200
+#define PFPE_WQEALLOC 0x00504400
+#define GLINT_CEQCTL(_INT) (0x0015c000 + ((_INT) * 4)) /* _i=0...2047 */
+#define VSIQF_PE_CTL1(_VSI) (0x00414000 + ((_VSI) * 4)) /* _i=0...767 */
+#define PFHMC_PDINV 0x00520300
+#define GLHMC_VFPDINV(_i) (0x00528300 + ((_i) * 4)) /* _i=0...31 */
+#define GLPE_CRITERR 0x00534000
+#define GLINT_RATE(_INT) (0x0015A000 + ((_INT) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */
+
+#define ICRDMA_DB_ADDR_OFFSET (8 * 1024 * 1024 - 64 * 1024)
+
+#define ICRDMA_VF_DB_ADDR_OFFSET (64 * 1024)
+
+/* shifts/masks for FLD_[LS/RS]_64 macros used in device table */
+#define ICRDMA_CCQPSTATUS_CCQP_DONE_S 0
+#define ICRDMA_CCQPSTATUS_CCQP_DONE BIT_ULL(0)
+#define ICRDMA_CCQPSTATUS_CCQP_ERR_S 31
+#define ICRDMA_CCQPSTATUS_CCQP_ERR BIT_ULL(31)
+#define ICRDMA_CQPSQ_STAG_PDID_S 46
+#define ICRDMA_CQPSQ_STAG_PDID GENMASK_ULL(63, 46)
+#define ICRDMA_CQPSQ_CQ_CEQID_S 22
+#define ICRDMA_CQPSQ_CQ_CEQID GENMASK_ULL(31, 22)
+#define ICRDMA_CQPSQ_CQ_CQID_S 0
+#define ICRDMA_CQPSQ_CQ_CQID GENMASK_ULL(18, 0)
+#define ICRDMA_COMMIT_FPM_CQCNT_S 0
+#define ICRDMA_COMMIT_FPM_CQCNT GENMASK_ULL(19, 0)
+
+enum icrdma_device_caps_const {
+ ICRDMA_MAX_STATS_COUNT = 128,
+
+ ICRDMA_MAX_IRD_SIZE = 127,
+ ICRDMA_MAX_ORD_SIZE = 255,
+
+};
+
+void icrdma_init_hw(struct irdma_sc_dev *dev);
+#endif /* ICRDMA_HW_H*/
diff --git a/drivers/infiniband/hw/irdma/irdma.h b/drivers/infiniband/hw/irdma/irdma.h
new file mode 100644
index 000000000000..46c12334c735
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/irdma.h
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2017 - 2021 Intel Corporation */
+#ifndef IRDMA_H
+#define IRDMA_H
+
+#define IRDMA_WQEALLOC_WQE_DESC_INDEX GENMASK(31, 20)
+
+#define IRDMA_CQPTAIL_WQTAIL GENMASK(10, 0)
+#define IRDMA_CQPTAIL_CQP_OP_ERR BIT(31)
+
+#define IRDMA_CQPERRCODES_CQP_MINOR_CODE GENMASK(15, 0)
+#define IRDMA_CQPERRCODES_CQP_MAJOR_CODE GENMASK(31, 16)
+#define IRDMA_GLPCI_LBARCTRL_PE_DB_SIZE GENMASK(5, 4)
+#define IRDMA_GLINT_RATE_INTERVAL GENMASK(5, 0)
+#define IRDMA_GLINT_RATE_INTRL_ENA BIT(6)
+#define IRDMA_GLINT_DYN_CTL_INTENA BIT(0)
+#define IRDMA_GLINT_DYN_CTL_CLEARPBA BIT(1)
+#define IRDMA_GLINT_DYN_CTL_ITR_INDX GENMASK(4, 3)
+#define IRDMA_GLINT_DYN_CTL_INTERVAL GENMASK(16, 5)
+#define IRDMA_GLINT_CEQCTL_ITR_INDX GENMASK(12, 11)
+#define IRDMA_GLINT_CEQCTL_CAUSE_ENA BIT(30)
+#define IRDMA_GLINT_CEQCTL_MSIX_INDX GENMASK(10, 0)
+#define IRDMA_PFINT_AEQCTL_MSIX_INDX GENMASK(10, 0)
+#define IRDMA_PFINT_AEQCTL_ITR_INDX GENMASK(12, 11)
+#define IRDMA_PFINT_AEQCTL_CAUSE_ENA BIT(30)
+#define IRDMA_PFHMC_PDINV_PMSDIDX GENMASK(11, 0)
+#define IRDMA_PFHMC_PDINV_PMSDPARTSEL BIT(15)
+#define IRDMA_PFHMC_PDINV_PMPDIDX GENMASK(24, 16)
+#define IRDMA_PFHMC_SDDATALOW_PMSDVALID BIT(0)
+#define IRDMA_PFHMC_SDDATALOW_PMSDTYPE BIT(1)
+#define IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT GENMASK(11, 2)
+#define IRDMA_PFHMC_SDDATALOW_PMSDDATALOW GENMASK(31, 12)
+#define IRDMA_PFHMC_SDCMD_PMSDWR BIT(31)
+
+#define IRDMA_INVALID_CQ_IDX 0xffffffff
+enum irdma_registers {
+ IRDMA_CQPTAIL,
+ IRDMA_CQPDB,
+ IRDMA_CCQPSTATUS,
+ IRDMA_CCQPHIGH,
+ IRDMA_CCQPLOW,
+ IRDMA_CQARM,
+ IRDMA_CQACK,
+ IRDMA_AEQALLOC,
+ IRDMA_CQPERRCODES,
+ IRDMA_WQEALLOC,
+ IRDMA_GLINT_DYN_CTL,
+ IRDMA_DB_ADDR_OFFSET,
+ IRDMA_GLPCI_LBARCTRL,
+ IRDMA_GLPE_CPUSTATUS0,
+ IRDMA_GLPE_CPUSTATUS1,
+ IRDMA_GLPE_CPUSTATUS2,
+ IRDMA_PFINT_AEQCTL,
+ IRDMA_GLINT_CEQCTL,
+ IRDMA_VSIQF_PE_CTL1,
+ IRDMA_PFHMC_PDINV,
+ IRDMA_GLHMC_VFPDINV,
+ IRDMA_GLPE_CRITERR,
+ IRDMA_GLINT_RATE,
+ IRDMA_MAX_REGS, /* Must be last entry */
+};
+
+enum irdma_shifts {
+ IRDMA_CCQPSTATUS_CCQP_DONE_S,
+ IRDMA_CCQPSTATUS_CCQP_ERR_S,
+ IRDMA_CQPSQ_STAG_PDID_S,
+ IRDMA_CQPSQ_CQ_CEQID_S,
+ IRDMA_CQPSQ_CQ_CQID_S,
+ IRDMA_COMMIT_FPM_CQCNT_S,
+ IRDMA_MAX_SHIFTS,
+};
+
+enum irdma_masks {
+ IRDMA_CCQPSTATUS_CCQP_DONE_M,
+ IRDMA_CCQPSTATUS_CCQP_ERR_M,
+ IRDMA_CQPSQ_STAG_PDID_M,
+ IRDMA_CQPSQ_CQ_CEQID_M,
+ IRDMA_CQPSQ_CQ_CQID_M,
+ IRDMA_COMMIT_FPM_CQCNT_M,
+ IRDMA_MAX_MASKS, /* Must be last entry */
+};
+
+#define IRDMA_MAX_MGS_PER_CTX 8
+
+struct irdma_mcast_grp_ctx_entry_info {
+ u32 qp_id;
+ bool valid_entry;
+ u16 dest_port;
+ u32 use_cnt;
+};
+
+struct irdma_mcast_grp_info {
+ u8 dest_mac_addr[ETH_ALEN];
+ u16 vlan_id;
+ u8 hmc_fcn_id;
+ bool ipv4_valid:1;
+ bool vlan_valid:1;
+ u16 mg_id;
+ u32 no_of_mgs;
+ u32 dest_ip_addr[4];
+ u16 qs_handle;
+ struct irdma_dma_mem dma_mem_mc;
+ struct irdma_mcast_grp_ctx_entry_info mg_ctx_info[IRDMA_MAX_MGS_PER_CTX];
+};
+
+enum irdma_vers {
+ IRDMA_GEN_RSVD,
+ IRDMA_GEN_1,
+ IRDMA_GEN_2,
+};
+
+struct irdma_uk_attrs {
+ u64 feature_flags;
+ u32 max_hw_wq_frags;
+ u32 max_hw_read_sges;
+ u32 max_hw_inline;
+ u32 max_hw_rq_quanta;
+ u32 max_hw_wq_quanta;
+ u32 min_hw_cq_size;
+ u32 max_hw_cq_size;
+ u16 max_hw_sq_chunk;
+ u8 hw_rev;
+};
+
+struct irdma_hw_attrs {
+ struct irdma_uk_attrs uk_attrs;
+ u64 max_hw_outbound_msg_size;
+ u64 max_hw_inbound_msg_size;
+ u64 max_mr_size;
+ u32 min_hw_qp_id;
+ u32 min_hw_aeq_size;
+ u32 max_hw_aeq_size;
+ u32 min_hw_ceq_size;
+ u32 max_hw_ceq_size;
+ u32 max_hw_device_pages;
+ u32 max_hw_vf_fpm_id;
+ u32 first_hw_vf_fpm_id;
+ u32 max_hw_ird;
+ u32 max_hw_ord;
+ u32 max_hw_wqes;
+ u32 max_hw_pds;
+ u32 max_hw_ena_vf_count;
+ u32 max_qp_wr;
+ u32 max_pe_ready_count;
+ u32 max_done_count;
+ u32 max_sleep_count;
+ u32 max_cqp_compl_wait_time_ms;
+ u16 max_stat_inst;
+};
+
+void i40iw_init_hw(struct irdma_sc_dev *dev);
+void icrdma_init_hw(struct irdma_sc_dev *dev);
+#endif /* IRDMA_H*/
diff --git a/drivers/infiniband/hw/irdma/main.c b/drivers/infiniband/hw/irdma/main.c
new file mode 100644
index 000000000000..ea59432351fb
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/main.c
@@ -0,0 +1,358 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#include "main.h"
+#include "../../../net/ethernet/intel/ice/ice.h"
+
+MODULE_ALIAS("i40iw");
+MODULE_AUTHOR("Intel Corporation, <e1000-rdma@lists.sourceforge.net>");
+MODULE_DESCRIPTION("Intel(R) Ethernet Protocol Driver for RDMA");
+MODULE_LICENSE("Dual BSD/GPL");
+
+static struct notifier_block irdma_inetaddr_notifier = {
+ .notifier_call = irdma_inetaddr_event
+};
+
+static struct notifier_block irdma_inetaddr6_notifier = {
+ .notifier_call = irdma_inet6addr_event
+};
+
+static struct notifier_block irdma_net_notifier = {
+ .notifier_call = irdma_net_event
+};
+
+static struct notifier_block irdma_netdevice_notifier = {
+ .notifier_call = irdma_netdevice_event
+};
+
+static void irdma_register_notifiers(void)
+{
+ register_inetaddr_notifier(&irdma_inetaddr_notifier);
+ register_inet6addr_notifier(&irdma_inetaddr6_notifier);
+ register_netevent_notifier(&irdma_net_notifier);
+ register_netdevice_notifier(&irdma_netdevice_notifier);
+}
+
+static void irdma_unregister_notifiers(void)
+{
+ unregister_netevent_notifier(&irdma_net_notifier);
+ unregister_inetaddr_notifier(&irdma_inetaddr_notifier);
+ unregister_inet6addr_notifier(&irdma_inetaddr6_notifier);
+ unregister_netdevice_notifier(&irdma_netdevice_notifier);
+}
+
+static void irdma_prep_tc_change(struct irdma_device *iwdev)
+{
+ iwdev->vsi.tc_change_pending = true;
+ irdma_sc_suspend_resume_qps(&iwdev->vsi, IRDMA_OP_SUSPEND);
+
+ /* Wait for all qp's to suspend */
+ wait_event_timeout(iwdev->suspend_wq,
+ !atomic_read(&iwdev->vsi.qp_suspend_reqs),
+ IRDMA_EVENT_TIMEOUT);
+ irdma_ws_reset(&iwdev->vsi);
+}
+
+static void irdma_log_invalid_mtu(u16 mtu, struct irdma_sc_dev *dev)
+{
+ if (mtu < IRDMA_MIN_MTU_IPV4)
+ ibdev_warn(to_ibdev(dev), "MTU setting [%d] too low for RDMA traffic. Minimum MTU is 576 for IPv4\n", mtu);
+ else if (mtu < IRDMA_MIN_MTU_IPV6)
+ ibdev_warn(to_ibdev(dev), "MTU setting [%d] too low for RDMA traffic. Minimum MTU is 1280 for IPv6\\n", mtu);
+}
+
+static void irdma_fill_qos_info(struct irdma_l2params *l2params,
+ struct iidc_qos_params *qos_info)
+{
+ int i;
+
+ l2params->num_tc = qos_info->num_tc;
+ l2params->vsi_prio_type = qos_info->vport_priority_type;
+ l2params->vsi_rel_bw = qos_info->vport_relative_bw;
+ for (i = 0; i < l2params->num_tc; i++) {
+ l2params->tc_info[i].egress_virt_up =
+ qos_info->tc_info[i].egress_virt_up;
+ l2params->tc_info[i].ingress_virt_up =
+ qos_info->tc_info[i].ingress_virt_up;
+ l2params->tc_info[i].prio_type = qos_info->tc_info[i].prio_type;
+ l2params->tc_info[i].rel_bw = qos_info->tc_info[i].rel_bw;
+ l2params->tc_info[i].tc_ctx = qos_info->tc_info[i].tc_ctx;
+ }
+ for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++)
+ l2params->up2tc[i] = qos_info->up2tc[i];
+}
+
+static void irdma_iidc_event_handler(struct ice_pf *pf, struct iidc_event *event)
+{
+ struct irdma_device *iwdev = dev_get_drvdata(&pf->adev->dev);
+ struct irdma_l2params l2params = {};
+
+ if (*event->type & BIT(IIDC_EVENT_AFTER_MTU_CHANGE)) {
+ ibdev_dbg(&iwdev->ibdev, "CLNT: new MTU = %d\n", iwdev->netdev->mtu);
+ if (iwdev->vsi.mtu != iwdev->netdev->mtu) {
+ l2params.mtu = iwdev->netdev->mtu;
+ l2params.mtu_changed = true;
+ irdma_log_invalid_mtu(l2params.mtu, &iwdev->rf->sc_dev);
+ irdma_change_l2params(&iwdev->vsi, &l2params);
+ }
+ } else if (*event->type & BIT(IIDC_EVENT_BEFORE_TC_CHANGE)) {
+ if (iwdev->vsi.tc_change_pending)
+ return;
+
+ irdma_prep_tc_change(iwdev);
+ } else if (*event->type & BIT(IIDC_EVENT_AFTER_TC_CHANGE)) {
+ struct iidc_qos_params qos_info = {};
+
+ if (!iwdev->vsi.tc_change_pending)
+ return;
+
+ l2params.tc_changed = true;
+ ibdev_dbg(&iwdev->ibdev, "CLNT: TC Change\n");
+ ice_get_qos_params(pf, &qos_info);
+ iwdev->dcb = qos_info.num_tc > 1;
+ irdma_fill_qos_info(&l2params, &qos_info);
+ irdma_change_l2params(&iwdev->vsi, &l2params);
+ } else if (*event->type & BIT(IIDC_EVENT_CRIT_ERR)) {
+ ibdev_warn(&iwdev->ibdev, "ICE OICR event notification: oicr = 0x%08x\n",
+ event->reg);
+ if (event->reg & IRDMAPFINT_OICR_PE_CRITERR_M) {
+ u32 pe_criterr;
+
+ pe_criterr = readl(iwdev->rf->sc_dev.hw_regs[IRDMA_GLPE_CRITERR]);
+#define IRDMA_Q1_RESOURCE_ERR 0x0001024d
+ if (pe_criterr != IRDMA_Q1_RESOURCE_ERR) {
+ ibdev_err(&iwdev->ibdev, "critical PE Error, GLPE_CRITERR=0x%08x\n",
+ pe_criterr);
+ iwdev->rf->reset = true;
+ } else {
+ ibdev_warn(&iwdev->ibdev, "Q1 Resource Check\n");
+ }
+ }
+ if (event->reg & IRDMAPFINT_OICR_HMC_ERR_M) {
+ ibdev_err(&iwdev->ibdev, "HMC Error\n");
+ iwdev->rf->reset = true;
+ }
+ if (event->reg & IRDMAPFINT_OICR_PE_PUSH_M) {
+ ibdev_err(&iwdev->ibdev, "PE Push Error\n");
+ iwdev->rf->reset = true;
+ }
+ if (iwdev->rf->reset)
+ iwdev->rf->gen_ops.request_reset(iwdev->rf);
+ }
+}
+
+/**
+ * irdma_request_reset - Request a reset
+ * @rf: RDMA PCI function
+ */
+static void irdma_request_reset(struct irdma_pci_f *rf)
+{
+ struct ice_pf *pf = rf->cdev;
+
+ ibdev_warn(&rf->iwdev->ibdev, "Requesting a reset\n");
+ ice_rdma_request_reset(pf, IIDC_PFR);
+}
+
+/**
+ * irdma_lan_register_qset - Register qset with LAN driver
+ * @vsi: vsi structure
+ * @tc_node: Traffic class node
+ */
+static enum irdma_status_code irdma_lan_register_qset(struct irdma_sc_vsi *vsi,
+ struct irdma_ws_node *tc_node)
+{
+ struct irdma_device *iwdev = vsi->back_vsi;
+ struct ice_pf *pf = iwdev->rf->cdev;
+ struct iidc_rdma_qset_params qset = {};
+ int ret;
+
+ qset.qs_handle = tc_node->qs_handle;
+ qset.tc = tc_node->traffic_class;
+ qset.vport_id = vsi->vsi_idx;
+ ret = ice_add_rdma_qset(pf, &qset);
+ if (ret) {
+ ibdev_dbg(&iwdev->ibdev, "WS: LAN alloc_res for rdma qset failed.\n");
+ return IRDMA_ERR_REG_QSET;
+ }
+
+ tc_node->l2_sched_node_id = qset.teid;
+ vsi->qos[tc_node->user_pri].l2_sched_node_id = qset.teid;
+
+ return 0;
+}
+
+/**
+ * irdma_lan_unregister_qset - Unregister qset with LAN driver
+ * @vsi: vsi structure
+ * @tc_node: Traffic class node
+ */
+static void irdma_lan_unregister_qset(struct irdma_sc_vsi *vsi,
+ struct irdma_ws_node *tc_node)
+{
+ struct irdma_device *iwdev = vsi->back_vsi;
+ struct ice_pf *pf = iwdev->rf->cdev;
+ struct iidc_rdma_qset_params qset = {};
+
+ qset.qs_handle = tc_node->qs_handle;
+ qset.tc = tc_node->traffic_class;
+ qset.vport_id = vsi->vsi_idx;
+ qset.teid = tc_node->l2_sched_node_id;
+
+ if (ice_del_rdma_qset(pf, &qset))
+ ibdev_dbg(&iwdev->ibdev, "WS: LAN free_res for rdma qset failed.\n");
+}
+
+static void irdma_remove(struct auxiliary_device *aux_dev)
+{
+ struct iidc_auxiliary_dev *iidc_adev = container_of(aux_dev,
+ struct iidc_auxiliary_dev,
+ adev);
+ struct ice_pf *pf = iidc_adev->pf;
+ struct irdma_device *iwdev = dev_get_drvdata(&aux_dev->dev);
+
+ irdma_ib_unregister_device(iwdev);
+ ice_rdma_update_vsi_filter(pf, iwdev->vsi_num, false);
+
+ pr_debug("INIT: Gen2 PF[%d] device remove success\n", PCI_FUNC(pf->pdev->devfn));
+}
+
+static void irdma_fill_device_info(struct irdma_device *iwdev, struct ice_pf *pf)
+{
+ struct irdma_pci_f *rf = iwdev->rf;
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
+
+ rf->cdev = pf;
+ rf->gen_ops.register_qset = irdma_lan_register_qset;
+ rf->gen_ops.unregister_qset = irdma_lan_unregister_qset;
+ rf->hw.hw_addr = pf->hw.hw_addr;
+ rf->pcidev = pf->pdev;
+ rf->msix_count = pf->num_rdma_msix;
+ rf->msix_entries = &pf->msix_entries[pf->rdma_base_vector];
+ rf->default_vsi.vsi_idx = vsi->vsi_num;
+ rf->protocol_used = IRDMA_ROCE_PROTOCOL_ONLY;
+ rf->rdma_ver = IRDMA_GEN_2;
+ rf->rsrc_profile = IRDMA_HMC_PROFILE_DEFAULT;
+ rf->rst_to = IRDMA_RST_TIMEOUT_HZ;
+ rf->gen_ops.request_reset = irdma_request_reset;
+ rf->limits_sel = 7;
+ rf->iwdev = iwdev;
+
+ iwdev->netdev = vsi->netdev;
+ iwdev->vsi_num = vsi->vsi_num;
+ iwdev->init_state = INITIAL_STATE;
+ iwdev->roce_cwnd = IRDMA_ROCE_CWND_DEFAULT;
+ iwdev->roce_ackcreds = IRDMA_ROCE_ACKCREDS_DEFAULT;
+ iwdev->rcv_wnd = IRDMA_CM_DEFAULT_RCV_WND_SCALED;
+ iwdev->rcv_wscale = IRDMA_CM_DEFAULT_RCV_WND_SCALE;
+ if (rf->protocol_used == IRDMA_ROCE_PROTOCOL_ONLY)
+ iwdev->roce_mode = true;
+}
+
+static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_device_id *id)
+{
+ struct iidc_auxiliary_dev *iidc_adev = container_of(aux_dev,
+ struct iidc_auxiliary_dev,
+ adev);
+ struct ice_pf *pf = iidc_adev->pf;
+ struct iidc_qos_params qos_info = {};
+ struct irdma_device *iwdev;
+ struct irdma_pci_f *rf;
+ struct irdma_l2params l2params = {};
+ int err;
+
+ iwdev = ib_alloc_device(irdma_device, ibdev);
+ if (!iwdev)
+ return -ENOMEM;
+ iwdev->rf = kzalloc(sizeof(*rf), GFP_KERNEL);
+ if (!iwdev->rf) {
+ ib_dealloc_device(&iwdev->ibdev);
+ return -ENOMEM;
+ }
+
+ irdma_fill_device_info(iwdev, pf);
+ rf = iwdev->rf;
+
+ if (irdma_ctrl_init_hw(rf)) {
+ err = -EIO;
+ goto err_ctrl_init;
+ }
+
+ l2params.mtu = iwdev->netdev->mtu;
+ ice_get_qos_params(pf, &qos_info);
+ irdma_fill_qos_info(&l2params, &qos_info);
+ if (irdma_rt_init_hw(iwdev, &l2params)) {
+ err = -EIO;
+ goto err_rt_init;
+ }
+
+ err = irdma_ib_register_device(iwdev);
+ if (err)
+ goto err_ibreg;
+
+ ice_rdma_update_vsi_filter(pf, iwdev->vsi_num, true);
+
+ ibdev_dbg(&iwdev->ibdev, "INIT: Gen2 PF[%d] device probe success\n", PCI_FUNC(rf->pcidev->devfn));
+ dev_set_drvdata(&aux_dev->dev, iwdev);
+
+ return 0;
+
+err_ibreg:
+ irdma_rt_deinit_hw(iwdev);
+err_rt_init:
+ irdma_ctrl_deinit_hw(rf);
+err_ctrl_init:
+ kfree(iwdev->rf);
+ ib_dealloc_device(&iwdev->ibdev);
+
+ return err;
+}
+
+static const struct auxiliary_device_id irdma_auxiliary_id_table[] = {
+ {.name = "ice.iwarp", },
+ {.name = "ice.roce", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(auxiliary, irdma_auxiliary_id_table);
+
+static struct iidc_auxiliary_drv irdma_auxiliary_drv = {
+ .adrv = {
+ .id_table = irdma_auxiliary_id_table,
+ .probe = irdma_probe,
+ .remove = irdma_remove,
+ },
+ .event_handler = irdma_iidc_event_handler,
+};
+
+static int __init irdma_init_module(void)
+{
+ int ret;
+
+ ret = auxiliary_driver_register(&i40iw_auxiliary_drv);
+ if (ret) {
+ pr_err("Failed i40iw(gen_1) auxiliary_driver_register() ret=%d\n",
+ ret);
+ return ret;
+ }
+
+ ret = auxiliary_driver_register(&irdma_auxiliary_drv.adrv);
+ if (ret) {
+ auxiliary_driver_unregister(&i40iw_auxiliary_drv);
+ pr_err("Failed irdma auxiliary_driver_register() ret=%d\n",
+ ret);
+ return ret;
+ }
+
+ irdma_register_notifiers();
+
+ return 0;
+}
+
+static void __exit irdma_exit_module(void)
+{
+ irdma_unregister_notifiers();
+ auxiliary_driver_unregister(&irdma_auxiliary_drv.adrv);
+ auxiliary_driver_unregister(&i40iw_auxiliary_drv);
+}
+
+module_init(irdma_init_module);
+module_exit(irdma_exit_module);
diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h
new file mode 100644
index 000000000000..743d9e143a99
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/main.h
@@ -0,0 +1,555 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#ifndef IRDMA_MAIN_H
+#define IRDMA_MAIN_H
+
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/if_vlan.h>
+#include <net/addrconf.h>
+#include <net/netevent.h>
+#include <net/tcp.h>
+#include <net/ip6_route.h>
+#include <net/flow.h>
+#include <net/secure_seq.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/spinlock.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/crc32c.h>
+#include <linux/kthread.h>
+#ifndef CONFIG_64BIT
+#include <linux/io-64-nonatomic-lo-hi.h>
+#endif
+#include <linux/auxiliary_bus.h>
+#include <linux/net/intel/iidc.h>
+#include <crypto/hash.h>
+#include <rdma/ib_smi.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_pack.h>
+#include <rdma/rdma_cm.h>
+#include <rdma/iw_cm.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_cache.h>
+#include <rdma/uverbs_ioctl.h>
+#include "status.h"
+#include "osdep.h"
+#include "defs.h"
+#include "hmc.h"
+#include "type.h"
+#include "ws.h"
+#include "protos.h"
+#include "pble.h"
+#include "cm.h"
+#include <rdma/irdma-abi.h>
+#include "verbs.h"
+#include "user.h"
+#include "puda.h"
+
+extern struct auxiliary_driver i40iw_auxiliary_drv;
+
+#define IRDMA_FW_VER_DEFAULT 2
+#define IRDMA_HW_VER 2
+
+#define IRDMA_ARP_ADD 1
+#define IRDMA_ARP_DELETE 2
+#define IRDMA_ARP_RESOLVE 3
+
+#define IRDMA_MACIP_ADD 1
+#define IRDMA_MACIP_DELETE 2
+
+#define IW_CCQ_SIZE (IRDMA_CQP_SW_SQSIZE_2048 + 1)
+#define IW_CEQ_SIZE 2048
+#define IW_AEQ_SIZE 2048
+
+#define RX_BUF_SIZE (1536 + 8)
+#define IW_REG0_SIZE (4 * 1024)
+#define IW_TX_TIMEOUT (6 * HZ)
+#define IW_FIRST_QPN 1
+
+#define IW_SW_CONTEXT_ALIGN 1024
+
+#define MAX_DPC_ITERATIONS 128
+
+#define IRDMA_EVENT_TIMEOUT 50000
+#define IRDMA_VCHNL_EVENT_TIMEOUT 100000
+#define IRDMA_RST_TIMEOUT_HZ 4
+
+#define IRDMA_NO_QSET 0xffff
+
+#define IW_CFG_FPM_QP_COUNT 32768
+#define IRDMA_MAX_PAGES_PER_FMR 512
+#define IRDMA_MIN_PAGES_PER_FMR 1
+#define IRDMA_CQP_COMPL_RQ_WQE_FLUSHED 2
+#define IRDMA_CQP_COMPL_SQ_WQE_FLUSHED 3
+
+#define IRDMA_Q_TYPE_PE_AEQ 0x80
+#define IRDMA_Q_INVALID_IDX 0xffff
+#define IRDMA_REM_ENDPOINT_TRK_QPID 3
+
+#define IRDMA_DRV_OPT_ENA_MPA_VER_0 0x00000001
+#define IRDMA_DRV_OPT_DISABLE_MPA_CRC 0x00000002
+#define IRDMA_DRV_OPT_DISABLE_FIRST_WRITE 0x00000004
+#define IRDMA_DRV_OPT_DISABLE_INTF 0x00000008
+#define IRDMA_DRV_OPT_ENA_MSI 0x00000010
+#define IRDMA_DRV_OPT_DUAL_LOGICAL_PORT 0x00000020
+#define IRDMA_DRV_OPT_NO_INLINE_DATA 0x00000080
+#define IRDMA_DRV_OPT_DISABLE_INT_MOD 0x00000100
+#define IRDMA_DRV_OPT_DISABLE_VIRT_WQ 0x00000200
+#define IRDMA_DRV_OPT_ENA_PAU 0x00000400
+#define IRDMA_DRV_OPT_MCAST_LOGPORT_MAP 0x00000800
+
+#define IW_HMC_OBJ_TYPE_NUM ARRAY_SIZE(iw_hmc_obj_types)
+#define IRDMA_ROCE_CWND_DEFAULT 0x400
+#define IRDMA_ROCE_ACKCREDS_DEFAULT 0x1E
+
+#define IRDMA_FLUSH_SQ BIT(0)
+#define IRDMA_FLUSH_RQ BIT(1)
+#define IRDMA_REFLUSH BIT(2)
+#define IRDMA_FLUSH_WAIT BIT(3)
+
+enum init_completion_state {
+ INVALID_STATE = 0,
+ INITIAL_STATE,
+ CQP_CREATED,
+ HMC_OBJS_CREATED,
+ HW_RSRC_INITIALIZED,
+ CCQ_CREATED,
+ CEQ0_CREATED, /* Last state of probe */
+ ILQ_CREATED,
+ IEQ_CREATED,
+ CEQS_CREATED,
+ PBLE_CHUNK_MEM,
+ AEQ_CREATED,
+ IP_ADDR_REGISTERED, /* Last state of open */
+};
+
+struct irdma_rsrc_limits {
+ u32 qplimit;
+ u32 mrlimit;
+ u32 cqlimit;
+};
+
+struct irdma_cqp_err_info {
+ u16 maj;
+ u16 min;
+ const char *desc;
+};
+
+struct irdma_cqp_compl_info {
+ u32 op_ret_val;
+ u16 maj_err_code;
+ u16 min_err_code;
+ bool error;
+ u8 op_code;
+};
+
+struct irdma_cqp_request {
+ struct cqp_cmds_info info;
+ wait_queue_head_t waitq;
+ struct list_head list;
+ refcount_t refcnt;
+ void (*callback_fcn)(struct irdma_cqp_request *cqp_request);
+ void *param;
+ struct irdma_cqp_compl_info compl_info;
+ bool waiting:1;
+ bool request_done:1;
+ bool dynamic:1;
+};
+
+struct irdma_cqp {
+ struct irdma_sc_cqp sc_cqp;
+ spinlock_t req_lock; /* protect CQP request list */
+ spinlock_t compl_lock; /* protect CQP completion processing */
+ wait_queue_head_t waitq;
+ wait_queue_head_t remove_wq;
+ struct irdma_dma_mem sq;
+ struct irdma_dma_mem host_ctx;
+ u64 *scratch_array;
+ struct irdma_cqp_request *cqp_requests;
+ struct list_head cqp_avail_reqs;
+ struct list_head cqp_pending_reqs;
+};
+
+struct irdma_ccq {
+ struct irdma_sc_cq sc_cq;
+ struct irdma_dma_mem mem_cq;
+ struct irdma_dma_mem shadow_area;
+};
+
+struct irdma_ceq {
+ struct irdma_sc_ceq sc_ceq;
+ struct irdma_dma_mem mem;
+ u32 irq;
+ u32 msix_idx;
+ struct irdma_pci_f *rf;
+ struct tasklet_struct dpc_tasklet;
+ spinlock_t ce_lock; /* sync cq destroy with cq completion event notification */
+};
+
+struct irdma_aeq {
+ struct irdma_sc_aeq sc_aeq;
+ struct irdma_dma_mem mem;
+ struct irdma_pble_alloc palloc;
+ bool virtual_map;
+};
+
+struct irdma_arp_entry {
+ u32 ip_addr[4];
+ u8 mac_addr[ETH_ALEN];
+};
+
+struct irdma_msix_vector {
+ u32 idx;
+ u32 irq;
+ u32 cpu_affinity;
+ u32 ceq_id;
+ cpumask_t mask;
+};
+
+struct irdma_mc_table_info {
+ u32 mgn;
+ u32 dest_ip[4];
+ bool lan_fwd:1;
+ bool ipv4_valid:1;
+};
+
+struct mc_table_list {
+ struct list_head list;
+ struct irdma_mc_table_info mc_info;
+ struct irdma_mcast_grp_info mc_grp_ctx;
+};
+
+struct irdma_qv_info {
+ u32 v_idx; /* msix_vector */
+ u16 ceq_idx;
+ u16 aeq_idx;
+ u8 itr_idx;
+};
+
+struct irdma_qvlist_info {
+ u32 num_vectors;
+ struct irdma_qv_info qv_info[1];
+};
+
+struct irdma_gen_ops {
+ void (*request_reset)(struct irdma_pci_f *rf);
+ enum irdma_status_code (*register_qset)(struct irdma_sc_vsi *vsi,
+ struct irdma_ws_node *tc_node);
+ void (*unregister_qset)(struct irdma_sc_vsi *vsi,
+ struct irdma_ws_node *tc_node);
+};
+
+struct irdma_pci_f {
+ bool reset:1;
+ bool rsrc_created:1;
+ bool msix_shared:1;
+ u8 rsrc_profile;
+ u8 *hmc_info_mem;
+ u8 *mem_rsrc;
+ u8 rdma_ver;
+ u8 rst_to;
+ enum irdma_protocol_used protocol_used;
+ u32 sd_type;
+ u32 msix_count;
+ u32 max_mr;
+ u32 max_qp;
+ u32 max_cq;
+ u32 max_ah;
+ u32 next_ah;
+ u32 max_mcg;
+ u32 next_mcg;
+ u32 max_pd;
+ u32 next_qp;
+ u32 next_cq;
+ u32 next_pd;
+ u32 max_mr_size;
+ u32 max_cqe;
+ u32 mr_stagmask;
+ u32 used_pds;
+ u32 used_cqs;
+ u32 used_mrs;
+ u32 used_qps;
+ u32 arp_table_size;
+ u32 next_arp_index;
+ u32 ceqs_count;
+ u32 next_ws_node_id;
+ u32 max_ws_node_id;
+ u32 limits_sel;
+ unsigned long *allocated_ws_nodes;
+ unsigned long *allocated_qps;
+ unsigned long *allocated_cqs;
+ unsigned long *allocated_mrs;
+ unsigned long *allocated_pds;
+ unsigned long *allocated_mcgs;
+ unsigned long *allocated_ahs;
+ unsigned long *allocated_arps;
+ enum init_completion_state init_state;
+ struct irdma_sc_dev sc_dev;
+ struct pci_dev *pcidev;
+ void *cdev;
+ struct irdma_hw hw;
+ struct irdma_cqp cqp;
+ struct irdma_ccq ccq;
+ struct irdma_aeq aeq;
+ struct irdma_ceq *ceqlist;
+ struct irdma_hmc_pble_rsrc *pble_rsrc;
+ struct irdma_arp_entry *arp_table;
+ spinlock_t arp_lock; /*protect ARP table access*/
+ spinlock_t rsrc_lock; /* protect HW resource array access */
+ spinlock_t qptable_lock; /*protect QP table access*/
+ struct irdma_qp **qp_table;
+ spinlock_t qh_list_lock; /* protect mc_qht_list */
+ struct mc_table_list mc_qht_list;
+ struct irdma_msix_vector *iw_msixtbl;
+ struct irdma_qvlist_info *iw_qvlist;
+ struct tasklet_struct dpc_tasklet;
+ struct msix_entry *msix_entries;
+ struct irdma_dma_mem obj_mem;
+ struct irdma_dma_mem obj_next;
+ atomic_t vchnl_msgs;
+ wait_queue_head_t vchnl_waitq;
+ struct workqueue_struct *cqp_cmpl_wq;
+ struct work_struct cqp_cmpl_work;
+ struct irdma_sc_vsi default_vsi;
+ void *back_fcn;
+ struct irdma_gen_ops gen_ops;
+ struct irdma_device *iwdev;
+};
+
+struct irdma_device {
+ struct ib_device ibdev;
+ struct irdma_pci_f *rf;
+ struct net_device *netdev;
+ struct workqueue_struct *cleanup_wq;
+ struct irdma_sc_vsi vsi;
+ struct irdma_cm_core cm_core;
+ u32 roce_cwnd;
+ u32 roce_ackcreds;
+ u32 vendor_id;
+ u32 vendor_part_id;
+ u32 device_cap_flags;
+ u32 push_mode;
+ u32 rcv_wnd;
+ u16 mac_ip_table_idx;
+ u16 vsi_num;
+ u8 rcv_wscale;
+ u8 iw_status;
+ bool roce_mode:1;
+ bool roce_dcqcn_en:1;
+ bool dcb:1;
+ bool reset:1;
+ bool iw_ooo:1;
+ enum init_completion_state init_state;
+
+ wait_queue_head_t suspend_wq;
+};
+
+static inline struct irdma_device *to_iwdev(struct ib_device *ibdev)
+{
+ return container_of(ibdev, struct irdma_device, ibdev);
+}
+
+static inline struct irdma_ucontext *to_ucontext(struct ib_ucontext *ibucontext)
+{
+ return container_of(ibucontext, struct irdma_ucontext, ibucontext);
+}
+
+static inline struct irdma_user_mmap_entry *
+to_irdma_mmap_entry(struct rdma_user_mmap_entry *rdma_entry)
+{
+ return container_of(rdma_entry, struct irdma_user_mmap_entry,
+ rdma_entry);
+}
+
+static inline struct irdma_pd *to_iwpd(struct ib_pd *ibpd)
+{
+ return container_of(ibpd, struct irdma_pd, ibpd);
+}
+
+static inline struct irdma_ah *to_iwah(struct ib_ah *ibah)
+{
+ return container_of(ibah, struct irdma_ah, ibah);
+}
+
+static inline struct irdma_mr *to_iwmr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct irdma_mr, ibmr);
+}
+
+static inline struct irdma_mr *to_iwmw(struct ib_mw *ibmw)
+{
+ return container_of(ibmw, struct irdma_mr, ibmw);
+}
+
+static inline struct irdma_cq *to_iwcq(struct ib_cq *ibcq)
+{
+ return container_of(ibcq, struct irdma_cq, ibcq);
+}
+
+static inline struct irdma_qp *to_iwqp(struct ib_qp *ibqp)
+{
+ return container_of(ibqp, struct irdma_qp, ibqp);
+}
+
+static inline struct irdma_pci_f *dev_to_rf(struct irdma_sc_dev *dev)
+{
+ return container_of(dev, struct irdma_pci_f, sc_dev);
+}
+
+/**
+ * irdma_alloc_resource - allocate a resource
+ * @iwdev: device pointer
+ * @resource_array: resource bit array:
+ * @max_resources: maximum resource number
+ * @req_resources_num: Allocated resource number
+ * @next: next free id
+ **/
+static inline int irdma_alloc_rsrc(struct irdma_pci_f *rf,
+ unsigned long *rsrc_array, u32 max_rsrc,
+ u32 *req_rsrc_num, u32 *next)
+{
+ u32 rsrc_num;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rf->rsrc_lock, flags);
+ rsrc_num = find_next_zero_bit(rsrc_array, max_rsrc, *next);
+ if (rsrc_num >= max_rsrc) {
+ rsrc_num = find_first_zero_bit(rsrc_array, max_rsrc);
+ if (rsrc_num >= max_rsrc) {
+ spin_unlock_irqrestore(&rf->rsrc_lock, flags);
+ ibdev_dbg(&rf->iwdev->ibdev,
+ "ERR: resource [%d] allocation failed\n",
+ rsrc_num);
+ return -EOVERFLOW;
+ }
+ }
+ __set_bit(rsrc_num, rsrc_array);
+ *next = rsrc_num + 1;
+ if (*next == max_rsrc)
+ *next = 0;
+ *req_rsrc_num = rsrc_num;
+ spin_unlock_irqrestore(&rf->rsrc_lock, flags);
+
+ return 0;
+}
+
+/**
+ * irdma_free_resource - free a resource
+ * @iwdev: device pointer
+ * @resource_array: resource array for the resource_num
+ * @resource_num: resource number to free
+ **/
+static inline void irdma_free_rsrc(struct irdma_pci_f *rf,
+ unsigned long *rsrc_array, u32 rsrc_num)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rf->rsrc_lock, flags);
+ __clear_bit(rsrc_num, rsrc_array);
+ spin_unlock_irqrestore(&rf->rsrc_lock, flags);
+}
+
+enum irdma_status_code irdma_ctrl_init_hw(struct irdma_pci_f *rf);
+void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf);
+enum irdma_status_code irdma_rt_init_hw(struct irdma_device *iwdev,
+ struct irdma_l2params *l2params);
+void irdma_rt_deinit_hw(struct irdma_device *iwdev);
+void irdma_qp_add_ref(struct ib_qp *ibqp);
+void irdma_qp_rem_ref(struct ib_qp *ibqp);
+void irdma_free_lsmm_rsrc(struct irdma_qp *iwqp);
+struct ib_qp *irdma_get_qp(struct ib_device *ibdev, int qpn);
+void irdma_flush_wqes(struct irdma_qp *iwqp, u32 flush_mask);
+void irdma_manage_arp_cache(struct irdma_pci_f *rf, unsigned char *mac_addr,
+ u32 *ip_addr, bool ipv4, u32 action);
+struct irdma_apbvt_entry *irdma_add_apbvt(struct irdma_device *iwdev, u16 port);
+void irdma_del_apbvt(struct irdma_device *iwdev,
+ struct irdma_apbvt_entry *entry);
+struct irdma_cqp_request *irdma_alloc_and_get_cqp_request(struct irdma_cqp *cqp,
+ bool wait);
+void irdma_free_cqp_request(struct irdma_cqp *cqp,
+ struct irdma_cqp_request *cqp_request);
+void irdma_put_cqp_request(struct irdma_cqp *cqp,
+ struct irdma_cqp_request *cqp_request);
+int irdma_alloc_local_mac_entry(struct irdma_pci_f *rf, u16 *mac_tbl_idx);
+int irdma_add_local_mac_entry(struct irdma_pci_f *rf, u8 *mac_addr, u16 idx);
+void irdma_del_local_mac_entry(struct irdma_pci_f *rf, u16 idx);
+
+u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf);
+void irdma_port_ibevent(struct irdma_device *iwdev);
+void irdma_cm_disconn(struct irdma_qp *qp);
+
+bool irdma_cqp_crit_err(struct irdma_sc_dev *dev, u8 cqp_cmd,
+ u16 maj_err_code, u16 min_err_code);
+enum irdma_status_code
+irdma_handle_cqp_op(struct irdma_pci_f *rf,
+ struct irdma_cqp_request *cqp_request);
+
+int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
+ struct ib_udata *udata);
+int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata);
+void irdma_cq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_cq *cq);
+
+void irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf);
+enum irdma_status_code irdma_hw_modify_qp(struct irdma_device *iwdev,
+ struct irdma_qp *iwqp,
+ struct irdma_modify_qp_info *info,
+ bool wait);
+enum irdma_status_code irdma_qp_suspend_resume(struct irdma_sc_qp *qp,
+ bool suspend);
+enum irdma_status_code
+irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo,
+ enum irdma_quad_entry_type etype,
+ enum irdma_quad_hash_manage_type mtype, void *cmnode,
+ bool wait);
+void irdma_receive_ilq(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *rbuf);
+void irdma_free_sqbuf(struct irdma_sc_vsi *vsi, void *bufp);
+void irdma_free_qp_rsrc(struct irdma_qp *iwqp);
+enum irdma_status_code irdma_setup_cm_core(struct irdma_device *iwdev, u8 ver);
+void irdma_cleanup_cm_core(struct irdma_cm_core *cm_core);
+void irdma_next_iw_state(struct irdma_qp *iwqp, u8 state, u8 del_hash, u8 term,
+ u8 term_len);
+int irdma_send_syn(struct irdma_cm_node *cm_node, u32 sendack);
+int irdma_send_reset(struct irdma_cm_node *cm_node);
+struct irdma_cm_node *irdma_find_node(struct irdma_cm_core *cm_core,
+ u16 rem_port, u32 *rem_addr, u16 loc_port,
+ u32 *loc_addr, u16 vlan_id);
+enum irdma_status_code irdma_hw_flush_wqes(struct irdma_pci_f *rf,
+ struct irdma_sc_qp *qp,
+ struct irdma_qp_flush_info *info,
+ bool wait);
+void irdma_gen_ae(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
+ struct irdma_gen_ae_info *info, bool wait);
+void irdma_copy_ip_ntohl(u32 *dst, __be32 *src);
+void irdma_copy_ip_htonl(__be32 *dst, u32 *src);
+u16 irdma_get_vlan_ipv4(u32 *addr);
+struct net_device *irdma_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *mac);
+struct ib_mr *irdma_reg_phys_mr(struct ib_pd *ib_pd, u64 addr, u64 size,
+ int acc, u64 *iova_start);
+int irdma_upload_qp_context(struct irdma_qp *iwqp, bool freeze, bool raw);
+void irdma_cqp_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq);
+int irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd,
+ bool wait,
+ void (*callback_fcn)(struct irdma_cqp_request *cqp_request),
+ void *cb_param);
+void irdma_gsi_ud_qp_ah_cb(struct irdma_cqp_request *cqp_request);
+int irdma_inetaddr_event(struct notifier_block *notifier, unsigned long event,
+ void *ptr);
+int irdma_inet6addr_event(struct notifier_block *notifier, unsigned long event,
+ void *ptr);
+int irdma_net_event(struct notifier_block *notifier, unsigned long event,
+ void *ptr);
+int irdma_netdevice_event(struct notifier_block *notifier, unsigned long event,
+ void *ptr);
+void irdma_add_ip(struct irdma_device *iwdev);
+void cqp_compl_worker(struct work_struct *work);
+#endif /* IRDMA_MAIN_H */
diff --git a/drivers/infiniband/hw/irdma/osdep.h b/drivers/infiniband/hw/irdma/osdep.h
new file mode 100644
index 000000000000..b2ab52335ca6
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/osdep.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#ifndef IRDMA_OSDEP_H
+#define IRDMA_OSDEP_H
+
+#include <linux/pci.h>
+#include <linux/bitfield.h>
+#include <crypto/hash.h>
+#include <rdma/ib_verbs.h>
+
+#define STATS_TIMER_DELAY 60000
+
+struct irdma_dma_info {
+ dma_addr_t *dmaaddrs;
+};
+
+struct irdma_dma_mem {
+ void *va;
+ dma_addr_t pa;
+ u32 size;
+} __packed;
+
+struct irdma_virt_mem {
+ void *va;
+ u32 size;
+} __packed;
+
+struct irdma_sc_vsi;
+struct irdma_sc_dev;
+struct irdma_sc_qp;
+struct irdma_puda_buf;
+struct irdma_puda_cmpl_info;
+struct irdma_update_sds_info;
+struct irdma_hmc_fcn_info;
+struct irdma_manage_vf_pble_info;
+struct irdma_hw;
+struct irdma_pci_f;
+
+struct ib_device *to_ibdev(struct irdma_sc_dev *dev);
+u8 __iomem *irdma_get_hw_addr(void *dev);
+void irdma_ieq_mpa_crc_ae(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp);
+enum irdma_status_code irdma_vf_wait_vchnl_resp(struct irdma_sc_dev *dev);
+bool irdma_vf_clear_to_send(struct irdma_sc_dev *dev);
+void irdma_add_dev_ref(struct irdma_sc_dev *dev);
+void irdma_put_dev_ref(struct irdma_sc_dev *dev);
+enum irdma_status_code irdma_ieq_check_mpacrc(struct shash_desc *desc,
+ void *addr, u32 len, u32 val);
+struct irdma_sc_qp *irdma_ieq_get_qp(struct irdma_sc_dev *dev,
+ struct irdma_puda_buf *buf);
+void irdma_send_ieq_ack(struct irdma_sc_qp *qp);
+void irdma_ieq_update_tcpip_info(struct irdma_puda_buf *buf, u16 len,
+ u32 seqnum);
+void irdma_free_hash_desc(struct shash_desc *hash_desc);
+enum irdma_status_code irdma_init_hash_desc(struct shash_desc **hash_desc);
+enum irdma_status_code
+irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
+ struct irdma_puda_buf *buf);
+enum irdma_status_code irdma_cqp_sds_cmd(struct irdma_sc_dev *dev,
+ struct irdma_update_sds_info *info);
+enum irdma_status_code
+irdma_cqp_manage_hmc_fcn_cmd(struct irdma_sc_dev *dev,
+ struct irdma_hmc_fcn_info *hmcfcninfo,
+ u16 *pmf_idx);
+enum irdma_status_code
+irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev,
+ struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
+enum irdma_status_code
+irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev,
+ struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
+enum irdma_status_code irdma_alloc_query_fpm_buf(struct irdma_sc_dev *dev,
+ struct irdma_dma_mem *mem);
+void *irdma_remove_cqp_head(struct irdma_sc_dev *dev);
+void irdma_term_modify_qp(struct irdma_sc_qp *qp, u8 next_state, u8 term,
+ u8 term_len);
+void irdma_terminate_done(struct irdma_sc_qp *qp, int timeout_occurred);
+void irdma_terminate_start_timer(struct irdma_sc_qp *qp);
+void irdma_terminate_del_timer(struct irdma_sc_qp *qp);
+void irdma_hw_stats_start_timer(struct irdma_sc_vsi *vsi);
+void irdma_hw_stats_stop_timer(struct irdma_sc_vsi *vsi);
+void wr32(struct irdma_hw *hw, u32 reg, u32 val);
+u32 rd32(struct irdma_hw *hw, u32 reg);
+u64 rd64(struct irdma_hw *hw, u32 reg);
+enum irdma_status_code irdma_map_vm_page_list(struct irdma_hw *hw, void *va,
+ dma_addr_t *pg_dma, u32 pg_cnt);
+void irdma_unmap_vm_page_list(struct irdma_hw *hw, dma_addr_t *pg_dma, u32 pg_cnt);
+#endif /* IRDMA_OSDEP_H */
diff --git a/drivers/infiniband/hw/irdma/pble.c b/drivers/infiniband/hw/irdma/pble.c
new file mode 100644
index 000000000000..aeeb1c310965
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/pble.c
@@ -0,0 +1,520 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#include "osdep.h"
+#include "status.h"
+#include "hmc.h"
+#include "defs.h"
+#include "type.h"
+#include "protos.h"
+#include "pble.h"
+
+static enum irdma_status_code
+add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc);
+
+/**
+ * irdma_destroy_pble_prm - destroy prm during module unload
+ * @pble_rsrc: pble resources
+ */
+void irdma_destroy_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
+{
+ struct irdma_chunk *chunk;
+ struct irdma_pble_prm *pinfo = &pble_rsrc->pinfo;
+
+ while (!list_empty(&pinfo->clist)) {
+ chunk = (struct irdma_chunk *) pinfo->clist.next;
+ list_del(&chunk->list);
+ if (chunk->type == PBLE_SD_PAGED)
+ irdma_pble_free_paged_mem(chunk);
+ if (chunk->bitmapbuf)
+ kfree(chunk->bitmapmem.va);
+ kfree(chunk->chunkmem.va);
+ }
+}
+
+/**
+ * irdma_hmc_init_pble - Initialize pble resources during module load
+ * @dev: irdma_sc_dev struct
+ * @pble_rsrc: pble resources
+ */
+enum irdma_status_code
+irdma_hmc_init_pble(struct irdma_sc_dev *dev,
+ struct irdma_hmc_pble_rsrc *pble_rsrc)
+{
+ struct irdma_hmc_info *hmc_info;
+ u32 fpm_idx = 0;
+ enum irdma_status_code status = 0;
+
+ hmc_info = dev->hmc_info;
+ pble_rsrc->dev = dev;
+ pble_rsrc->fpm_base_addr = hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].base;
+ /* Start pble' on 4k boundary */
+ if (pble_rsrc->fpm_base_addr & 0xfff)
+ fpm_idx = (4096 - (pble_rsrc->fpm_base_addr & 0xfff)) >> 3;
+ pble_rsrc->unallocated_pble =
+ hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt - fpm_idx;
+ pble_rsrc->next_fpm_addr = pble_rsrc->fpm_base_addr + (fpm_idx << 3);
+ pble_rsrc->pinfo.pble_shift = PBLE_SHIFT;
+
+ mutex_init(&pble_rsrc->pble_mutex_lock);
+
+ spin_lock_init(&pble_rsrc->pinfo.prm_lock);
+ INIT_LIST_HEAD(&pble_rsrc->pinfo.clist);
+ if (add_pble_prm(pble_rsrc)) {
+ irdma_destroy_pble_prm(pble_rsrc);
+ status = IRDMA_ERR_NO_MEMORY;
+ }
+
+ return status;
+}
+
+/**
+ * get_sd_pd_idx - Returns sd index, pd index and rel_pd_idx from fpm address
+ * @pble_rsrc: structure containing fpm address
+ * @idx: where to return indexes
+ */
+static void get_sd_pd_idx(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ struct sd_pd_idx *idx)
+{
+ idx->sd_idx = (u32)pble_rsrc->next_fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE;
+ idx->pd_idx = (u32)(pble_rsrc->next_fpm_addr / IRDMA_HMC_PAGED_BP_SIZE);
+ idx->rel_pd_idx = (idx->pd_idx % IRDMA_HMC_PD_CNT_IN_SD);
+}
+
+/**
+ * add_sd_direct - add sd direct for pble
+ * @pble_rsrc: pble resource ptr
+ * @info: page info for sd
+ */
+static enum irdma_status_code
+add_sd_direct(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ struct irdma_add_page_info *info)
+{
+ struct irdma_sc_dev *dev = pble_rsrc->dev;
+ enum irdma_status_code ret_code = 0;
+ struct sd_pd_idx *idx = &info->idx;
+ struct irdma_chunk *chunk = info->chunk;
+ struct irdma_hmc_info *hmc_info = info->hmc_info;
+ struct irdma_hmc_sd_entry *sd_entry = info->sd_entry;
+ u32 offset = 0;
+
+ if (!sd_entry->valid) {
+ ret_code = irdma_add_sd_table_entry(dev->hw, hmc_info,
+ info->idx.sd_idx,
+ IRDMA_SD_TYPE_DIRECT,
+ IRDMA_HMC_DIRECT_BP_SIZE);
+ if (ret_code)
+ return ret_code;
+
+ chunk->type = PBLE_SD_CONTIGOUS;
+ }
+
+ offset = idx->rel_pd_idx << HMC_PAGED_BP_SHIFT;
+ chunk->size = info->pages << HMC_PAGED_BP_SHIFT;
+ chunk->vaddr = sd_entry->u.bp.addr.va + offset;
+ chunk->fpm_addr = pble_rsrc->next_fpm_addr;
+ ibdev_dbg(to_ibdev(dev),
+ "PBLE: chunk_size[%lld] = 0x%llx vaddr=0x%pK fpm_addr = %llx\n",
+ chunk->size, chunk->size, chunk->vaddr, chunk->fpm_addr);
+
+ return 0;
+}
+
+/**
+ * fpm_to_idx - given fpm address, get pble index
+ * @pble_rsrc: pble resource management
+ * @addr: fpm address for index
+ */
+static u32 fpm_to_idx(struct irdma_hmc_pble_rsrc *pble_rsrc, u64 addr)
+{
+ u64 idx;
+
+ idx = (addr - (pble_rsrc->fpm_base_addr)) >> 3;
+
+ return (u32)idx;
+}
+
+/**
+ * add_bp_pages - add backing pages for sd
+ * @pble_rsrc: pble resource management
+ * @info: page info for sd
+ */
+static enum irdma_status_code
+add_bp_pages(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ struct irdma_add_page_info *info)
+{
+ struct irdma_sc_dev *dev = pble_rsrc->dev;
+ u8 *addr;
+ struct irdma_dma_mem mem;
+ struct irdma_hmc_pd_entry *pd_entry;
+ struct irdma_hmc_sd_entry *sd_entry = info->sd_entry;
+ struct irdma_hmc_info *hmc_info = info->hmc_info;
+ struct irdma_chunk *chunk = info->chunk;
+ enum irdma_status_code status = 0;
+ u32 rel_pd_idx = info->idx.rel_pd_idx;
+ u32 pd_idx = info->idx.pd_idx;
+ u32 i;
+
+ if (irdma_pble_get_paged_mem(chunk, info->pages))
+ return IRDMA_ERR_NO_MEMORY;
+
+ status = irdma_add_sd_table_entry(dev->hw, hmc_info, info->idx.sd_idx,
+ IRDMA_SD_TYPE_PAGED,
+ IRDMA_HMC_DIRECT_BP_SIZE);
+ if (status)
+ goto error;
+
+ addr = chunk->vaddr;
+ for (i = 0; i < info->pages; i++) {
+ mem.pa = (u64)chunk->dmainfo.dmaaddrs[i];
+ mem.size = 4096;
+ mem.va = addr;
+ pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx++];
+ if (!pd_entry->valid) {
+ status = irdma_add_pd_table_entry(dev, hmc_info,
+ pd_idx++, &mem);
+ if (status)
+ goto error;
+
+ addr += 4096;
+ }
+ }
+
+ chunk->fpm_addr = pble_rsrc->next_fpm_addr;
+ return 0;
+
+error:
+ irdma_pble_free_paged_mem(chunk);
+
+ return status;
+}
+
+/**
+ * irdma_get_type - add a sd entry type for sd
+ * @dev: irdma_sc_dev struct
+ * @idx: index of sd
+ * @pages: pages in the sd
+ */
+static enum irdma_sd_entry_type irdma_get_type(struct irdma_sc_dev *dev,
+ struct sd_pd_idx *idx, u32 pages)
+{
+ enum irdma_sd_entry_type sd_entry_type;
+
+ sd_entry_type = !idx->rel_pd_idx && pages == IRDMA_HMC_PD_CNT_IN_SD ?
+ IRDMA_SD_TYPE_DIRECT : IRDMA_SD_TYPE_PAGED;
+ return sd_entry_type;
+}
+
+/**
+ * add_pble_prm - add a sd entry for pble resoure
+ * @pble_rsrc: pble resource management
+ */
+static enum irdma_status_code
+add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
+{
+ struct irdma_sc_dev *dev = pble_rsrc->dev;
+ struct irdma_hmc_sd_entry *sd_entry;
+ struct irdma_hmc_info *hmc_info;
+ struct irdma_chunk *chunk;
+ struct irdma_add_page_info info;
+ struct sd_pd_idx *idx = &info.idx;
+ enum irdma_status_code ret_code = 0;
+ enum irdma_sd_entry_type sd_entry_type;
+ u64 sd_reg_val = 0;
+ struct irdma_virt_mem chunkmem;
+ u32 pages;
+
+ if (pble_rsrc->unallocated_pble < PBLE_PER_PAGE)
+ return IRDMA_ERR_NO_MEMORY;
+
+ if (pble_rsrc->next_fpm_addr & 0xfff)
+ return IRDMA_ERR_INVALID_PAGE_DESC_INDEX;
+
+ chunkmem.size = sizeof(*chunk);
+ chunkmem.va = kzalloc(chunkmem.size, GFP_KERNEL);
+ if (!chunkmem.va)
+ return IRDMA_ERR_NO_MEMORY;
+
+ chunk = chunkmem.va;
+ chunk->chunkmem = chunkmem;
+ hmc_info = dev->hmc_info;
+ chunk->dev = dev;
+ chunk->fpm_addr = pble_rsrc->next_fpm_addr;
+ get_sd_pd_idx(pble_rsrc, idx);
+ sd_entry = &hmc_info->sd_table.sd_entry[idx->sd_idx];
+ pages = (idx->rel_pd_idx) ? (IRDMA_HMC_PD_CNT_IN_SD - idx->rel_pd_idx) :
+ IRDMA_HMC_PD_CNT_IN_SD;
+ pages = min(pages, pble_rsrc->unallocated_pble >> PBLE_512_SHIFT);
+ info.chunk = chunk;
+ info.hmc_info = hmc_info;
+ info.pages = pages;
+ info.sd_entry = sd_entry;
+ if (!sd_entry->valid)
+ sd_entry_type = irdma_get_type(dev, idx, pages);
+ else
+ sd_entry_type = sd_entry->entry_type;
+
+ ibdev_dbg(to_ibdev(dev),
+ "PBLE: pages = %d, unallocated_pble[%d] current_fpm_addr = %llx\n",
+ pages, pble_rsrc->unallocated_pble,
+ pble_rsrc->next_fpm_addr);
+ ibdev_dbg(to_ibdev(dev), "PBLE: sd_entry_type = %d\n", sd_entry_type);
+ if (sd_entry_type == IRDMA_SD_TYPE_DIRECT)
+ ret_code = add_sd_direct(pble_rsrc, &info);
+
+ if (ret_code)
+ sd_entry_type = IRDMA_SD_TYPE_PAGED;
+ else
+ pble_rsrc->stats_direct_sds++;
+
+ if (sd_entry_type == IRDMA_SD_TYPE_PAGED) {
+ ret_code = add_bp_pages(pble_rsrc, &info);
+ if (ret_code)
+ goto error;
+ else
+ pble_rsrc->stats_paged_sds++;
+ }
+
+ ret_code = irdma_prm_add_pble_mem(&pble_rsrc->pinfo, chunk);
+ if (ret_code)
+ goto error;
+
+ pble_rsrc->next_fpm_addr += chunk->size;
+ ibdev_dbg(to_ibdev(dev),
+ "PBLE: next_fpm_addr = %llx chunk_size[%llu] = 0x%llx\n",
+ pble_rsrc->next_fpm_addr, chunk->size, chunk->size);
+ pble_rsrc->unallocated_pble -= (u32)(chunk->size >> 3);
+ list_add(&chunk->list, &pble_rsrc->pinfo.clist);
+ sd_reg_val = (sd_entry_type == IRDMA_SD_TYPE_PAGED) ?
+ sd_entry->u.pd_table.pd_page_addr.pa :
+ sd_entry->u.bp.addr.pa;
+
+ if (!sd_entry->valid) {
+ ret_code = irdma_hmc_sd_one(dev, hmc_info->hmc_fn_id, sd_reg_val,
+ idx->sd_idx, sd_entry->entry_type, true);
+ if (ret_code)
+ goto error;
+ }
+
+ sd_entry->valid = true;
+ return 0;
+
+error:
+ if (chunk->bitmapbuf)
+ kfree(chunk->bitmapmem.va);
+ kfree(chunk->chunkmem.va);
+
+ return ret_code;
+}
+
+/**
+ * free_lvl2 - fee level 2 pble
+ * @pble_rsrc: pble resource management
+ * @palloc: level 2 pble allocation
+ */
+static void free_lvl2(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ struct irdma_pble_alloc *palloc)
+{
+ u32 i;
+ struct irdma_pble_level2 *lvl2 = &palloc->level2;
+ struct irdma_pble_info *root = &lvl2->root;
+ struct irdma_pble_info *leaf = lvl2->leaf;
+
+ for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
+ if (leaf->addr)
+ irdma_prm_return_pbles(&pble_rsrc->pinfo,
+ &leaf->chunkinfo);
+ else
+ break;
+ }
+
+ if (root->addr)
+ irdma_prm_return_pbles(&pble_rsrc->pinfo, &root->chunkinfo);
+
+ kfree(lvl2->leafmem.va);
+ lvl2->leaf = NULL;
+}
+
+/**
+ * get_lvl2_pble - get level 2 pble resource
+ * @pble_rsrc: pble resource management
+ * @palloc: level 2 pble allocation
+ */
+static enum irdma_status_code
+get_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ struct irdma_pble_alloc *palloc)
+{
+ u32 lf4k, lflast, total, i;
+ u32 pblcnt = PBLE_PER_PAGE;
+ u64 *addr;
+ struct irdma_pble_level2 *lvl2 = &palloc->level2;
+ struct irdma_pble_info *root = &lvl2->root;
+ struct irdma_pble_info *leaf;
+ enum irdma_status_code ret_code;
+ u64 fpm_addr;
+
+ /* number of full 512 (4K) leafs) */
+ lf4k = palloc->total_cnt >> 9;
+ lflast = palloc->total_cnt % PBLE_PER_PAGE;
+ total = (lflast == 0) ? lf4k : lf4k + 1;
+ lvl2->leaf_cnt = total;
+
+ lvl2->leafmem.size = (sizeof(*leaf) * total);
+ lvl2->leafmem.va = kzalloc(lvl2->leafmem.size, GFP_KERNEL);
+ if (!lvl2->leafmem.va)
+ return IRDMA_ERR_NO_MEMORY;
+
+ lvl2->leaf = lvl2->leafmem.va;
+ leaf = lvl2->leaf;
+ ret_code = irdma_prm_get_pbles(&pble_rsrc->pinfo, &root->chunkinfo,
+ total << 3, &root->addr, &fpm_addr);
+ if (ret_code) {
+ kfree(lvl2->leafmem.va);
+ lvl2->leaf = NULL;
+ return IRDMA_ERR_NO_MEMORY;
+ }
+
+ root->idx = fpm_to_idx(pble_rsrc, fpm_addr);
+ root->cnt = total;
+ addr = root->addr;
+ for (i = 0; i < total; i++, leaf++) {
+ pblcnt = (lflast && ((i + 1) == total)) ?
+ lflast : PBLE_PER_PAGE;
+ ret_code = irdma_prm_get_pbles(&pble_rsrc->pinfo,
+ &leaf->chunkinfo, pblcnt << 3,
+ &leaf->addr, &fpm_addr);
+ if (ret_code)
+ goto error;
+
+ leaf->idx = fpm_to_idx(pble_rsrc, fpm_addr);
+
+ leaf->cnt = pblcnt;
+ *addr = (u64)leaf->idx;
+ addr++;
+ }
+
+ palloc->level = PBLE_LEVEL_2;
+ pble_rsrc->stats_lvl2++;
+ return 0;
+
+error:
+ free_lvl2(pble_rsrc, palloc);
+
+ return IRDMA_ERR_NO_MEMORY;
+}
+
+/**
+ * get_lvl1_pble - get level 1 pble resource
+ * @pble_rsrc: pble resource management
+ * @palloc: level 1 pble allocation
+ */
+static enum irdma_status_code
+get_lvl1_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ struct irdma_pble_alloc *palloc)
+{
+ enum irdma_status_code ret_code;
+ u64 fpm_addr;
+ struct irdma_pble_info *lvl1 = &palloc->level1;
+
+ ret_code = irdma_prm_get_pbles(&pble_rsrc->pinfo, &lvl1->chunkinfo,
+ palloc->total_cnt << 3, &lvl1->addr,
+ &fpm_addr);
+ if (ret_code)
+ return IRDMA_ERR_NO_MEMORY;
+
+ palloc->level = PBLE_LEVEL_1;
+ lvl1->idx = fpm_to_idx(pble_rsrc, fpm_addr);
+ lvl1->cnt = palloc->total_cnt;
+ pble_rsrc->stats_lvl1++;
+
+ return 0;
+}
+
+/**
+ * get_lvl1_lvl2_pble - calls get_lvl1 and get_lvl2 pble routine
+ * @pble_rsrc: pble resources
+ * @palloc: contains all inforamtion regarding pble (idx + pble addr)
+ * @level1_only: flag for a level 1 PBLE
+ */
+static enum irdma_status_code
+get_lvl1_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ struct irdma_pble_alloc *palloc, bool level1_only)
+{
+ enum irdma_status_code status = 0;
+
+ status = get_lvl1_pble(pble_rsrc, palloc);
+ if (!status || level1_only || palloc->total_cnt <= PBLE_PER_PAGE)
+ return status;
+
+ status = get_lvl2_pble(pble_rsrc, palloc);
+
+ return status;
+}
+
+/**
+ * irdma_get_pble - allocate pbles from the prm
+ * @pble_rsrc: pble resources
+ * @palloc: contains all inforamtion regarding pble (idx + pble addr)
+ * @pble_cnt: #of pbles requested
+ * @level1_only: true if only pble level 1 to acquire
+ */
+enum irdma_status_code irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ struct irdma_pble_alloc *palloc,
+ u32 pble_cnt, bool level1_only)
+{
+ enum irdma_status_code status = 0;
+ int max_sds = 0;
+ int i;
+
+ palloc->total_cnt = pble_cnt;
+ palloc->level = PBLE_LEVEL_0;
+
+ mutex_lock(&pble_rsrc->pble_mutex_lock);
+
+ /*check first to see if we can get pble's without acquiring
+ * additional sd's
+ */
+ status = get_lvl1_lvl2_pble(pble_rsrc, palloc, level1_only);
+ if (!status)
+ goto exit;
+
+ max_sds = (palloc->total_cnt >> 18) + 1;
+ for (i = 0; i < max_sds; i++) {
+ status = add_pble_prm(pble_rsrc);
+ if (status)
+ break;
+
+ status = get_lvl1_lvl2_pble(pble_rsrc, palloc, level1_only);
+ /* if level1_only, only go through it once */
+ if (!status || level1_only)
+ break;
+ }
+
+exit:
+ if (!status) {
+ pble_rsrc->allocdpbles += pble_cnt;
+ pble_rsrc->stats_alloc_ok++;
+ } else {
+ pble_rsrc->stats_alloc_fail++;
+ }
+ mutex_unlock(&pble_rsrc->pble_mutex_lock);
+
+ return status;
+}
+
+/**
+ * irdma_free_pble - put pbles back into prm
+ * @pble_rsrc: pble resources
+ * @palloc: contains all information regarding pble resource being freed
+ */
+void irdma_free_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ struct irdma_pble_alloc *palloc)
+{
+ pble_rsrc->freedpbles += palloc->total_cnt;
+
+ if (palloc->level == PBLE_LEVEL_2)
+ free_lvl2(pble_rsrc, palloc);
+ else
+ irdma_prm_return_pbles(&pble_rsrc->pinfo,
+ &palloc->level1.chunkinfo);
+ pble_rsrc->stats_alloc_freed++;
+}
diff --git a/drivers/infiniband/hw/irdma/pble.h b/drivers/infiniband/hw/irdma/pble.h
new file mode 100644
index 000000000000..e1b3b8118a2c
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/pble.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2015 - 2019 Intel Corporation */
+#ifndef IRDMA_PBLE_H
+#define IRDMA_PBLE_H
+
+#define PBLE_SHIFT 6
+#define PBLE_PER_PAGE 512
+#define HMC_PAGED_BP_SHIFT 12
+#define PBLE_512_SHIFT 9
+#define PBLE_INVALID_IDX 0xffffffff
+
+enum irdma_pble_level {
+ PBLE_LEVEL_0 = 0,
+ PBLE_LEVEL_1 = 1,
+ PBLE_LEVEL_2 = 2,
+};
+
+enum irdma_alloc_type {
+ PBLE_NO_ALLOC = 0,
+ PBLE_SD_CONTIGOUS = 1,
+ PBLE_SD_PAGED = 2,
+};
+
+struct irdma_chunk;
+
+struct irdma_pble_chunkinfo {
+ struct irdma_chunk *pchunk;
+ u64 bit_idx;
+ u64 bits_used;
+};
+
+struct irdma_pble_info {
+ u64 *addr;
+ u32 idx;
+ u32 cnt;
+ struct irdma_pble_chunkinfo chunkinfo;
+};
+
+struct irdma_pble_level2 {
+ struct irdma_pble_info root;
+ struct irdma_pble_info *leaf;
+ struct irdma_virt_mem leafmem;
+ u32 leaf_cnt;
+};
+
+struct irdma_pble_alloc {
+ u32 total_cnt;
+ enum irdma_pble_level level;
+ union {
+ struct irdma_pble_info level1;
+ struct irdma_pble_level2 level2;
+ };
+};
+
+struct sd_pd_idx {
+ u32 sd_idx;
+ u32 pd_idx;
+ u32 rel_pd_idx;
+};
+
+struct irdma_add_page_info {
+ struct irdma_chunk *chunk;
+ struct irdma_hmc_sd_entry *sd_entry;
+ struct irdma_hmc_info *hmc_info;
+ struct sd_pd_idx idx;
+ u32 pages;
+};
+
+struct irdma_chunk {
+ struct list_head list;
+ struct irdma_dma_info dmainfo;
+ void *bitmapbuf;
+
+ u32 sizeofbitmap;
+ u64 size;
+ void *vaddr;
+ u64 fpm_addr;
+ u32 pg_cnt;
+ enum irdma_alloc_type type;
+ struct irdma_sc_dev *dev;
+ struct irdma_virt_mem bitmapmem;
+ struct irdma_virt_mem chunkmem;
+};
+
+struct irdma_pble_prm {
+ struct list_head clist;
+ spinlock_t prm_lock; /* protect prm bitmap */
+ u64 total_pble_alloc;
+ u64 free_pble_cnt;
+ u8 pble_shift;
+};
+
+struct irdma_hmc_pble_rsrc {
+ u32 unallocated_pble;
+ struct mutex pble_mutex_lock; /* protect PBLE resource */
+ struct irdma_sc_dev *dev;
+ u64 fpm_base_addr;
+ u64 next_fpm_addr;
+ struct irdma_pble_prm pinfo;
+ u64 allocdpbles;
+ u64 freedpbles;
+ u32 stats_direct_sds;
+ u32 stats_paged_sds;
+ u64 stats_alloc_ok;
+ u64 stats_alloc_fail;
+ u64 stats_alloc_freed;
+ u64 stats_lvl1;
+ u64 stats_lvl2;
+};
+
+void irdma_destroy_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc);
+enum irdma_status_code
+irdma_hmc_init_pble(struct irdma_sc_dev *dev,
+ struct irdma_hmc_pble_rsrc *pble_rsrc);
+void irdma_free_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ struct irdma_pble_alloc *palloc);
+enum irdma_status_code irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ struct irdma_pble_alloc *palloc,
+ u32 pble_cnt, bool level1_only);
+enum irdma_status_code irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm,
+ struct irdma_chunk *pchunk);
+enum irdma_status_code
+irdma_prm_get_pbles(struct irdma_pble_prm *pprm,
+ struct irdma_pble_chunkinfo *chunkinfo, u64 mem_size,
+ u64 **vaddr, u64 *fpm_addr);
+void irdma_prm_return_pbles(struct irdma_pble_prm *pprm,
+ struct irdma_pble_chunkinfo *chunkinfo);
+void irdma_pble_acquire_lock(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ unsigned long *flags);
+void irdma_pble_release_lock(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ unsigned long *flags);
+void irdma_pble_free_paged_mem(struct irdma_chunk *chunk);
+enum irdma_status_code irdma_pble_get_paged_mem(struct irdma_chunk *chunk,
+ u32 pg_cnt);
+void irdma_prm_rem_bitmapmem(struct irdma_hw *hw, struct irdma_chunk *chunk);
+#endif /* IRDMA_PBLE_H */
diff --git a/drivers/infiniband/hw/irdma/protos.h b/drivers/infiniband/hw/irdma/protos.h
new file mode 100644
index 000000000000..e3f5173706fe
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/protos.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2016 - 2021 Intel Corporation */
+#ifndef IRDMA_PROTOS_H
+#define IRDMA_PROTOS_H
+
+#define PAUSE_TIMER_VAL 0xffff
+#define REFRESH_THRESHOLD 0x7fff
+#define HIGH_THRESHOLD 0x800
+#define LOW_THRESHOLD 0x200
+#define ALL_TC2PFC 0xff
+#define CQP_COMPL_WAIT_TIME_MS 10
+#define CQP_TIMEOUT_THRESHOLD 500
+
+/* init operations */
+enum irdma_status_code irdma_sc_dev_init(enum irdma_vers ver,
+ struct irdma_sc_dev *dev,
+ struct irdma_device_init_info *info);
+void irdma_sc_rt_init(struct irdma_sc_dev *dev);
+void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp);
+__le64 *irdma_sc_cqp_get_next_send_wqe(struct irdma_sc_cqp *cqp, u64 scratch);
+enum irdma_status_code
+irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
+ struct irdma_fast_reg_stag_info *info, bool post_sq);
+/* HMC/FPM functions */
+enum irdma_status_code irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev,
+ u8 hmc_fn_id);
+/* stats misc */
+enum irdma_status_code
+irdma_cqp_gather_stats_cmd(struct irdma_sc_dev *dev,
+ struct irdma_vsi_pestat *pestat, bool wait);
+void irdma_cqp_gather_stats_gen1(struct irdma_sc_dev *dev,
+ struct irdma_vsi_pestat *pestat);
+void irdma_hw_stats_read_all(struct irdma_vsi_pestat *stats,
+ struct irdma_dev_hw_stats *stats_values,
+ u64 *hw_stats_regs_32, u64 *hw_stats_regs_64,
+ u8 hw_rev);
+enum irdma_status_code
+irdma_cqp_ws_node_cmd(struct irdma_sc_dev *dev, u8 cmd,
+ struct irdma_ws_node_info *node_info);
+enum irdma_status_code irdma_cqp_up_map_cmd(struct irdma_sc_dev *dev, u8 cmd,
+ struct irdma_up_info *map_info);
+enum irdma_status_code irdma_cqp_ceq_cmd(struct irdma_sc_dev *dev,
+ struct irdma_sc_ceq *sc_ceq, u8 op);
+enum irdma_status_code irdma_cqp_aeq_cmd(struct irdma_sc_dev *dev,
+ struct irdma_sc_aeq *sc_aeq, u8 op);
+enum irdma_status_code
+irdma_cqp_stats_inst_cmd(struct irdma_sc_vsi *vsi, u8 cmd,
+ struct irdma_stats_inst_info *stats_info);
+u16 irdma_alloc_ws_node_id(struct irdma_sc_dev *dev);
+void irdma_free_ws_node_id(struct irdma_sc_dev *dev, u16 node_id);
+void irdma_update_stats(struct irdma_dev_hw_stats *hw_stats,
+ struct irdma_gather_stats *gather_stats,
+ struct irdma_gather_stats *last_gather_stats);
+/* vsi functions */
+enum irdma_status_code irdma_vsi_stats_init(struct irdma_sc_vsi *vsi,
+ struct irdma_vsi_stats_info *info);
+void irdma_vsi_stats_free(struct irdma_sc_vsi *vsi);
+void irdma_sc_vsi_init(struct irdma_sc_vsi *vsi,
+ struct irdma_vsi_init_info *info);
+enum irdma_status_code irdma_sc_add_cq_ctx(struct irdma_sc_ceq *ceq,
+ struct irdma_sc_cq *cq);
+void irdma_sc_remove_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq);
+/* misc L2 param change functions */
+void irdma_change_l2params(struct irdma_sc_vsi *vsi,
+ struct irdma_l2params *l2params);
+void irdma_sc_suspend_resume_qps(struct irdma_sc_vsi *vsi, u8 suspend);
+enum irdma_status_code irdma_cqp_qp_suspend_resume(struct irdma_sc_qp *qp,
+ u8 cmd);
+void irdma_qp_add_qos(struct irdma_sc_qp *qp);
+void irdma_qp_rem_qos(struct irdma_sc_qp *qp);
+struct irdma_sc_qp *irdma_get_qp_from_list(struct list_head *head,
+ struct irdma_sc_qp *qp);
+void irdma_reinitialize_ieq(struct irdma_sc_vsi *vsi);
+u16 irdma_alloc_ws_node_id(struct irdma_sc_dev *dev);
+void irdma_free_ws_node_id(struct irdma_sc_dev *dev, u16 node_id);
+/* terminate functions*/
+void irdma_terminate_send_fin(struct irdma_sc_qp *qp);
+
+void irdma_terminate_connection(struct irdma_sc_qp *qp,
+ struct irdma_aeqe_info *info);
+
+void irdma_terminate_received(struct irdma_sc_qp *qp,
+ struct irdma_aeqe_info *info);
+/* dynamic memory allocation */
+/* misc */
+u8 irdma_get_encoded_wqe_size(u32 wqsize, enum irdma_queue_type queue_type);
+void irdma_modify_qp_to_err(struct irdma_sc_qp *sc_qp);
+enum irdma_status_code
+irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
+ u8 hmc_fn_id, bool post_sq,
+ bool poll_registers);
+enum irdma_status_code irdma_cfg_fpm_val(struct irdma_sc_dev *dev,
+ u32 qp_count);
+enum irdma_status_code irdma_get_rdma_features(struct irdma_sc_dev *dev);
+void free_sd_mem(struct irdma_sc_dev *dev);
+enum irdma_status_code irdma_process_cqp_cmd(struct irdma_sc_dev *dev,
+ struct cqp_cmds_info *pcmdinfo);
+enum irdma_status_code irdma_process_bh(struct irdma_sc_dev *dev);
+enum irdma_status_code irdma_cqp_sds_cmd(struct irdma_sc_dev *dev,
+ struct irdma_update_sds_info *info);
+enum irdma_status_code
+irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev,
+ struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
+enum irdma_status_code
+irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev,
+ struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
+enum irdma_status_code irdma_alloc_query_fpm_buf(struct irdma_sc_dev *dev,
+ struct irdma_dma_mem *mem);
+enum irdma_status_code
+irdma_cqp_manage_hmc_fcn_cmd(struct irdma_sc_dev *dev,
+ struct irdma_hmc_fcn_info *hmcfcninfo,
+ u16 *pmf_idx);
+void irdma_add_dev_ref(struct irdma_sc_dev *dev);
+void irdma_put_dev_ref(struct irdma_sc_dev *dev);
+void *irdma_remove_cqp_head(struct irdma_sc_dev *dev);
+#endif /* IRDMA_PROTOS_H */
diff --git a/drivers/infiniband/hw/irdma/puda.c b/drivers/infiniband/hw/irdma/puda.c
new file mode 100644
index 000000000000..58e7d875643b
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/puda.c
@@ -0,0 +1,1744 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#include "osdep.h"
+#include "status.h"
+#include "hmc.h"
+#include "defs.h"
+#include "type.h"
+#include "protos.h"
+#include "puda.h"
+#include "ws.h"
+
+static void irdma_ieq_receive(struct irdma_sc_vsi *vsi,
+ struct irdma_puda_buf *buf);
+static void irdma_ieq_tx_compl(struct irdma_sc_vsi *vsi, void *sqwrid);
+static void irdma_ilq_putback_rcvbuf(struct irdma_sc_qp *qp,
+ struct irdma_puda_buf *buf, u32 wqe_idx);
+/**
+ * irdma_puda_get_listbuf - get buffer from puda list
+ * @list: list to use for buffers (ILQ or IEQ)
+ */
+static struct irdma_puda_buf *irdma_puda_get_listbuf(struct list_head *list)
+{
+ struct irdma_puda_buf *buf = NULL;
+
+ if (!list_empty(list)) {
+ buf = (struct irdma_puda_buf *)list->next;
+ list_del((struct list_head *)&buf->list);
+ }
+
+ return buf;
+}
+
+/**
+ * irdma_puda_get_bufpool - return buffer from resource
+ * @rsrc: resource to use for buffer
+ */
+struct irdma_puda_buf *irdma_puda_get_bufpool(struct irdma_puda_rsrc *rsrc)
+{
+ struct irdma_puda_buf *buf = NULL;
+ struct list_head *list = &rsrc->bufpool;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rsrc->bufpool_lock, flags);
+ buf = irdma_puda_get_listbuf(list);
+ if (buf) {
+ rsrc->avail_buf_count--;
+ buf->vsi = rsrc->vsi;
+ } else {
+ rsrc->stats_buf_alloc_fail++;
+ }
+ spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
+
+ return buf;
+}
+
+/**
+ * irdma_puda_ret_bufpool - return buffer to rsrc list
+ * @rsrc: resource to use for buffer
+ * @buf: buffer to return to resource
+ */
+void irdma_puda_ret_bufpool(struct irdma_puda_rsrc *rsrc,
+ struct irdma_puda_buf *buf)
+{
+ unsigned long flags;
+
+ buf->do_lpb = false;
+ spin_lock_irqsave(&rsrc->bufpool_lock, flags);
+ list_add(&buf->list, &rsrc->bufpool);
+ spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
+ rsrc->avail_buf_count++;
+}
+
+/**
+ * irdma_puda_post_recvbuf - set wqe for rcv buffer
+ * @rsrc: resource ptr
+ * @wqe_idx: wqe index to use
+ * @buf: puda buffer for rcv q
+ * @initial: flag if during init time
+ */
+static void irdma_puda_post_recvbuf(struct irdma_puda_rsrc *rsrc, u32 wqe_idx,
+ struct irdma_puda_buf *buf, bool initial)
+{
+ __le64 *wqe;
+ struct irdma_sc_qp *qp = &rsrc->qp;
+ u64 offset24 = 0;
+
+ /* Synch buffer for use by device */
+ dma_sync_single_for_device(rsrc->dev->hw->device, buf->mem.pa,
+ buf->mem.size, DMA_BIDIRECTIONAL);
+ qp->qp_uk.rq_wrid_array[wqe_idx] = (uintptr_t)buf;
+ wqe = qp->qp_uk.rq_base[wqe_idx].elem;
+ if (!initial)
+ get_64bit_val(wqe, 24, &offset24);
+
+ offset24 = (offset24) ? 0 : FIELD_PREP(IRDMAQPSQ_VALID, 1);
+
+ set_64bit_val(wqe, 16, 0);
+ set_64bit_val(wqe, 0, buf->mem.pa);
+ if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, buf->mem.size));
+ } else {
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMAQPSQ_FRAG_LEN, buf->mem.size) |
+ offset24);
+ }
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, offset24);
+}
+
+/**
+ * irdma_puda_replenish_rq - post rcv buffers
+ * @rsrc: resource to use for buffer
+ * @initial: flag if during init time
+ */
+static enum irdma_status_code
+irdma_puda_replenish_rq(struct irdma_puda_rsrc *rsrc, bool initial)
+{
+ u32 i;
+ u32 invalid_cnt = rsrc->rxq_invalid_cnt;
+ struct irdma_puda_buf *buf = NULL;
+
+ for (i = 0; i < invalid_cnt; i++) {
+ buf = irdma_puda_get_bufpool(rsrc);
+ if (!buf)
+ return IRDMA_ERR_list_empty;
+ irdma_puda_post_recvbuf(rsrc, rsrc->rx_wqe_idx, buf, initial);
+ rsrc->rx_wqe_idx = ((rsrc->rx_wqe_idx + 1) % rsrc->rq_size);
+ rsrc->rxq_invalid_cnt--;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_puda_alloc_buf - allocate mem for buffer
+ * @dev: iwarp device
+ * @len: length of buffer
+ */
+static struct irdma_puda_buf *irdma_puda_alloc_buf(struct irdma_sc_dev *dev,
+ u32 len)
+{
+ struct irdma_puda_buf *buf;
+ struct irdma_virt_mem buf_mem;
+
+ buf_mem.size = sizeof(struct irdma_puda_buf);
+ buf_mem.va = kzalloc(buf_mem.size, GFP_KERNEL);
+ if (!buf_mem.va)
+ return NULL;
+
+ buf = buf_mem.va;
+ buf->mem.size = len;
+ buf->mem.va = kzalloc(buf->mem.size, GFP_KERNEL);
+ if (!buf->mem.va)
+ goto free_virt;
+ buf->mem.pa = dma_map_single(dev->hw->device, buf->mem.va,
+ buf->mem.size, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev->hw->device, buf->mem.pa)) {
+ kfree(buf->mem.va);
+ goto free_virt;
+ }
+
+ buf->buf_mem.va = buf_mem.va;
+ buf->buf_mem.size = buf_mem.size;
+
+ return buf;
+
+free_virt:
+ kfree(buf_mem.va);
+ return NULL;
+}
+
+/**
+ * irdma_puda_dele_buf - delete buffer back to system
+ * @dev: iwarp device
+ * @buf: buffer to free
+ */
+static void irdma_puda_dele_buf(struct irdma_sc_dev *dev,
+ struct irdma_puda_buf *buf)
+{
+ dma_unmap_single(dev->hw->device, buf->mem.pa, buf->mem.size,
+ DMA_BIDIRECTIONAL);
+ kfree(buf->mem.va);
+ kfree(buf->buf_mem.va);
+}
+
+/**
+ * irdma_puda_get_next_send_wqe - return next wqe for processing
+ * @qp: puda qp for wqe
+ * @wqe_idx: wqe index for caller
+ */
+static __le64 *irdma_puda_get_next_send_wqe(struct irdma_qp_uk *qp,
+ u32 *wqe_idx)
+{
+ __le64 *wqe = NULL;
+ enum irdma_status_code ret_code = 0;
+
+ *wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
+ if (!*wqe_idx)
+ qp->swqe_polarity = !qp->swqe_polarity;
+ IRDMA_RING_MOVE_HEAD(qp->sq_ring, ret_code);
+ if (ret_code)
+ return wqe;
+
+ wqe = qp->sq_base[*wqe_idx].elem;
+
+ return wqe;
+}
+
+/**
+ * irdma_puda_poll_info - poll cq for completion
+ * @cq: cq for poll
+ * @info: info return for successful completion
+ */
+static enum irdma_status_code
+irdma_puda_poll_info(struct irdma_sc_cq *cq, struct irdma_puda_cmpl_info *info)
+{
+ struct irdma_cq_uk *cq_uk = &cq->cq_uk;
+ u64 qword0, qword2, qword3, qword6;
+ __le64 *cqe;
+ __le64 *ext_cqe = NULL;
+ u64 qword7 = 0;
+ u64 comp_ctx;
+ bool valid_bit;
+ bool ext_valid = 0;
+ u32 major_err, minor_err;
+ u32 peek_head;
+ bool error;
+ u8 polarity;
+
+ cqe = IRDMA_GET_CURRENT_CQ_ELEM(&cq->cq_uk);
+ get_64bit_val(cqe, 24, &qword3);
+ valid_bit = (bool)FIELD_GET(IRDMA_CQ_VALID, qword3);
+ if (valid_bit != cq_uk->polarity)
+ return IRDMA_ERR_Q_EMPTY;
+
+ if (cq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
+ ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3);
+
+ if (ext_valid) {
+ peek_head = (cq_uk->cq_ring.head + 1) % cq_uk->cq_ring.size;
+ ext_cqe = cq_uk->cq_base[peek_head].buf;
+ get_64bit_val(ext_cqe, 24, &qword7);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
+ if (!peek_head)
+ polarity ^= 1;
+ if (polarity != cq_uk->polarity)
+ return IRDMA_ERR_Q_EMPTY;
+
+ IRDMA_RING_MOVE_HEAD_NOCHECK(cq_uk->cq_ring);
+ if (!IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring))
+ cq_uk->polarity = !cq_uk->polarity;
+ /* update cq tail in cq shadow memory also */
+ IRDMA_RING_MOVE_TAIL(cq_uk->cq_ring);
+ }
+
+ print_hex_dump_debug("PUDA: PUDA CQE", DUMP_PREFIX_OFFSET, 16, 8, cqe,
+ 32, false);
+ if (ext_valid)
+ print_hex_dump_debug("PUDA: PUDA EXT-CQE", DUMP_PREFIX_OFFSET,
+ 16, 8, ext_cqe, 32, false);
+
+ error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
+ if (error) {
+ ibdev_dbg(to_ibdev(cq->dev), "PUDA: receive error\n");
+ major_err = (u32)(FIELD_GET(IRDMA_CQ_MAJERR, qword3));
+ minor_err = (u32)(FIELD_GET(IRDMA_CQ_MINERR, qword3));
+ info->compl_error = major_err << 16 | minor_err;
+ return IRDMA_ERR_CQ_COMPL_ERROR;
+ }
+
+ get_64bit_val(cqe, 0, &qword0);
+ get_64bit_val(cqe, 16, &qword2);
+
+ info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
+ info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2);
+ if (cq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
+ info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
+
+ get_64bit_val(cqe, 8, &comp_ctx);
+ info->qp = (struct irdma_qp_uk *)(unsigned long)comp_ctx;
+ info->wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
+
+ if (info->q_type == IRDMA_CQE_QTYPE_RQ) {
+ if (ext_valid) {
+ info->vlan_valid = (bool)FIELD_GET(IRDMA_CQ_UDVLANVALID, qword7);
+ if (info->vlan_valid) {
+ get_64bit_val(ext_cqe, 16, &qword6);
+ info->vlan = (u16)FIELD_GET(IRDMA_CQ_UDVLAN, qword6);
+ }
+ info->smac_valid = (bool)FIELD_GET(IRDMA_CQ_UDSMACVALID, qword7);
+ if (info->smac_valid) {
+ get_64bit_val(ext_cqe, 16, &qword6);
+ info->smac[0] = (u8)((qword6 >> 40) & 0xFF);
+ info->smac[1] = (u8)((qword6 >> 32) & 0xFF);
+ info->smac[2] = (u8)((qword6 >> 24) & 0xFF);
+ info->smac[3] = (u8)((qword6 >> 16) & 0xFF);
+ info->smac[4] = (u8)((qword6 >> 8) & 0xFF);
+ info->smac[5] = (u8)(qword6 & 0xFF);
+ }
+ }
+
+ if (cq->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) {
+ info->vlan_valid = (bool)FIELD_GET(IRDMA_VLAN_TAG_VALID, qword3);
+ info->l4proto = (u8)FIELD_GET(IRDMA_UDA_L4PROTO, qword2);
+ info->l3proto = (u8)FIELD_GET(IRDMA_UDA_L3PROTO, qword2);
+ }
+
+ info->payload_len = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_puda_poll_cmpl - processes completion for cq
+ * @dev: iwarp device
+ * @cq: cq getting interrupt
+ * @compl_err: return any completion err
+ */
+enum irdma_status_code irdma_puda_poll_cmpl(struct irdma_sc_dev *dev,
+ struct irdma_sc_cq *cq,
+ u32 *compl_err)
+{
+ struct irdma_qp_uk *qp;
+ struct irdma_cq_uk *cq_uk = &cq->cq_uk;
+ struct irdma_puda_cmpl_info info = {};
+ enum irdma_status_code ret = 0;
+ struct irdma_puda_buf *buf;
+ struct irdma_puda_rsrc *rsrc;
+ u8 cq_type = cq->cq_type;
+ unsigned long flags;
+
+ if (cq_type == IRDMA_CQ_TYPE_ILQ || cq_type == IRDMA_CQ_TYPE_IEQ) {
+ rsrc = (cq_type == IRDMA_CQ_TYPE_ILQ) ? cq->vsi->ilq :
+ cq->vsi->ieq;
+ } else {
+ ibdev_dbg(to_ibdev(dev), "PUDA: qp_type error\n");
+ return IRDMA_ERR_BAD_PTR;
+ }
+
+ ret = irdma_puda_poll_info(cq, &info);
+ *compl_err = info.compl_error;
+ if (ret == IRDMA_ERR_Q_EMPTY)
+ return ret;
+ if (ret)
+ goto done;
+
+ qp = info.qp;
+ if (!qp || !rsrc) {
+ ret = IRDMA_ERR_BAD_PTR;
+ goto done;
+ }
+
+ if (qp->qp_id != rsrc->qp_id) {
+ ret = IRDMA_ERR_BAD_PTR;
+ goto done;
+ }
+
+ if (info.q_type == IRDMA_CQE_QTYPE_RQ) {
+ buf = (struct irdma_puda_buf *)(uintptr_t)
+ qp->rq_wrid_array[info.wqe_idx];
+
+ /* reusing so synch the buffer for CPU use */
+ dma_sync_single_for_cpu(dev->hw->device, buf->mem.pa,
+ buf->mem.size, DMA_BIDIRECTIONAL);
+ /* Get all the tcpip information in the buf header */
+ ret = irdma_puda_get_tcpip_info(&info, buf);
+ if (ret) {
+ rsrc->stats_rcvd_pkt_err++;
+ if (cq_type == IRDMA_CQ_TYPE_ILQ) {
+ irdma_ilq_putback_rcvbuf(&rsrc->qp, buf,
+ info.wqe_idx);
+ } else {
+ irdma_puda_ret_bufpool(rsrc, buf);
+ irdma_puda_replenish_rq(rsrc, false);
+ }
+ goto done;
+ }
+
+ rsrc->stats_pkt_rcvd++;
+ rsrc->compl_rxwqe_idx = info.wqe_idx;
+ ibdev_dbg(to_ibdev(dev), "PUDA: RQ completion\n");
+ rsrc->receive(rsrc->vsi, buf);
+ if (cq_type == IRDMA_CQ_TYPE_ILQ)
+ irdma_ilq_putback_rcvbuf(&rsrc->qp, buf, info.wqe_idx);
+ else
+ irdma_puda_replenish_rq(rsrc, false);
+
+ } else {
+ ibdev_dbg(to_ibdev(dev), "PUDA: SQ completion\n");
+ buf = (struct irdma_puda_buf *)(uintptr_t)
+ qp->sq_wrtrk_array[info.wqe_idx].wrid;
+
+ /* reusing so synch the buffer for CPU use */
+ dma_sync_single_for_cpu(dev->hw->device, buf->mem.pa,
+ buf->mem.size, DMA_BIDIRECTIONAL);
+ IRDMA_RING_SET_TAIL(qp->sq_ring, info.wqe_idx);
+ rsrc->xmit_complete(rsrc->vsi, buf);
+ spin_lock_irqsave(&rsrc->bufpool_lock, flags);
+ rsrc->tx_wqe_avail_cnt++;
+ spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
+ if (!list_empty(&rsrc->txpend))
+ irdma_puda_send_buf(rsrc, NULL);
+ }
+
+done:
+ IRDMA_RING_MOVE_HEAD_NOCHECK(cq_uk->cq_ring);
+ if (!IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring))
+ cq_uk->polarity = !cq_uk->polarity;
+ /* update cq tail in cq shadow memory also */
+ IRDMA_RING_MOVE_TAIL(cq_uk->cq_ring);
+ set_64bit_val(cq_uk->shadow_area, 0,
+ IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring));
+
+ return ret;
+}
+
+/**
+ * irdma_puda_send - complete send wqe for transmit
+ * @qp: puda qp for send
+ * @info: buffer information for transmit
+ */
+enum irdma_status_code irdma_puda_send(struct irdma_sc_qp *qp,
+ struct irdma_puda_send_info *info)
+{
+ __le64 *wqe;
+ u32 iplen, l4len;
+ u64 hdr[2];
+ u32 wqe_idx;
+ u8 iipt;
+
+ /* number of 32 bits DWORDS in header */
+ l4len = info->tcplen >> 2;
+ if (info->ipv4) {
+ iipt = 3;
+ iplen = 5;
+ } else {
+ iipt = 1;
+ iplen = 10;
+ }
+
+ wqe = irdma_puda_get_next_send_wqe(&qp->qp_uk, &wqe_idx);
+ if (!wqe)
+ return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
+
+ qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid = (uintptr_t)info->scratch;
+ /* Third line of WQE descriptor */
+ /* maclen is in words */
+
+ if (qp->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ hdr[0] = 0; /* Dest_QPN and Dest_QKey only for UD */
+ hdr[1] = FIELD_PREP(IRDMA_UDA_QPSQ_OPCODE, IRDMA_OP_TYPE_SEND) |
+ FIELD_PREP(IRDMA_UDA_QPSQ_L4LEN, l4len) |
+ FIELD_PREP(IRDMAQPSQ_AHID, info->ah_id) |
+ FIELD_PREP(IRDMA_UDA_QPSQ_SIGCOMPL, 1) |
+ FIELD_PREP(IRDMA_UDA_QPSQ_VALID,
+ qp->qp_uk.swqe_polarity);
+
+ /* Forth line of WQE descriptor */
+
+ set_64bit_val(wqe, 0, info->paddr);
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMAQPSQ_FRAG_LEN, info->len) |
+ FIELD_PREP(IRDMA_UDA_QPSQ_VALID, qp->qp_uk.swqe_polarity));
+ } else {
+ hdr[0] = FIELD_PREP(IRDMA_UDA_QPSQ_MACLEN, info->maclen >> 1) |
+ FIELD_PREP(IRDMA_UDA_QPSQ_IPLEN, iplen) |
+ FIELD_PREP(IRDMA_UDA_QPSQ_L4T, 1) |
+ FIELD_PREP(IRDMA_UDA_QPSQ_IIPT, iipt) |
+ FIELD_PREP(IRDMA_GEN1_UDA_QPSQ_L4LEN, l4len);
+
+ hdr[1] = FIELD_PREP(IRDMA_UDA_QPSQ_OPCODE, IRDMA_OP_TYPE_SEND) |
+ FIELD_PREP(IRDMA_UDA_QPSQ_SIGCOMPL, 1) |
+ FIELD_PREP(IRDMA_UDA_QPSQ_DOLOOPBACK, info->do_lpb) |
+ FIELD_PREP(IRDMA_UDA_QPSQ_VALID, qp->qp_uk.swqe_polarity);
+
+ /* Forth line of WQE descriptor */
+
+ set_64bit_val(wqe, 0, info->paddr);
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, info->len));
+ }
+
+ set_64bit_val(wqe, 16, hdr[0]);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr[1]);
+
+ print_hex_dump_debug("PUDA: PUDA SEND WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, 32, false);
+ irdma_uk_qp_post_wr(&qp->qp_uk);
+ return 0;
+}
+
+/**
+ * irdma_puda_send_buf - transmit puda buffer
+ * @rsrc: resource to use for buffer
+ * @buf: puda buffer to transmit
+ */
+void irdma_puda_send_buf(struct irdma_puda_rsrc *rsrc,
+ struct irdma_puda_buf *buf)
+{
+ struct irdma_puda_send_info info;
+ enum irdma_status_code ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rsrc->bufpool_lock, flags);
+ /* if no wqe available or not from a completion and we have
+ * pending buffers, we must queue new buffer
+ */
+ if (!rsrc->tx_wqe_avail_cnt || (buf && !list_empty(&rsrc->txpend))) {
+ list_add_tail(&buf->list, &rsrc->txpend);
+ spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
+ rsrc->stats_sent_pkt_q++;
+ if (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ)
+ ibdev_dbg(to_ibdev(rsrc->dev),
+ "PUDA: adding to txpend\n");
+ return;
+ }
+ rsrc->tx_wqe_avail_cnt--;
+ /* if we are coming from a completion and have pending buffers
+ * then Get one from pending list
+ */
+ if (!buf) {
+ buf = irdma_puda_get_listbuf(&rsrc->txpend);
+ if (!buf)
+ goto done;
+ }
+
+ info.scratch = buf;
+ info.paddr = buf->mem.pa;
+ info.len = buf->totallen;
+ info.tcplen = buf->tcphlen;
+ info.ipv4 = buf->ipv4;
+
+ if (rsrc->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ info.ah_id = buf->ah_id;
+ } else {
+ info.maclen = buf->maclen;
+ info.do_lpb = buf->do_lpb;
+ }
+
+ /* Synch buffer for use by device */
+ dma_sync_single_for_cpu(rsrc->dev->hw->device, buf->mem.pa,
+ buf->mem.size, DMA_BIDIRECTIONAL);
+ ret = irdma_puda_send(&rsrc->qp, &info);
+ if (ret) {
+ rsrc->tx_wqe_avail_cnt++;
+ rsrc->stats_sent_pkt_q++;
+ list_add(&buf->list, &rsrc->txpend);
+ if (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ)
+ ibdev_dbg(to_ibdev(rsrc->dev),
+ "PUDA: adding to puda_send\n");
+ } else {
+ rsrc->stats_pkt_sent++;
+ }
+done:
+ spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
+}
+
+/**
+ * irdma_puda_qp_setctx - during init, set qp's context
+ * @rsrc: qp's resource
+ */
+static void irdma_puda_qp_setctx(struct irdma_puda_rsrc *rsrc)
+{
+ struct irdma_sc_qp *qp = &rsrc->qp;
+ __le64 *qp_ctx = qp->hw_host_ctx;
+
+ set_64bit_val(qp_ctx, 8, qp->sq_pa);
+ set_64bit_val(qp_ctx, 16, qp->rq_pa);
+ set_64bit_val(qp_ctx, 24,
+ FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) |
+ FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size));
+ set_64bit_val(qp_ctx, 48,
+ FIELD_PREP(IRDMAQPC_SNDMSS, rsrc->buf_size));
+ set_64bit_val(qp_ctx, 56, 0);
+ if (qp->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
+ set_64bit_val(qp_ctx, 64, 1);
+ set_64bit_val(qp_ctx, 136,
+ FIELD_PREP(IRDMAQPC_TXCQNUM, rsrc->cq_id) |
+ FIELD_PREP(IRDMAQPC_RXCQNUM, rsrc->cq_id));
+ set_64bit_val(qp_ctx, 144,
+ FIELD_PREP(IRDMAQPC_STAT_INDEX, rsrc->stats_idx));
+ set_64bit_val(qp_ctx, 160,
+ FIELD_PREP(IRDMAQPC_PRIVEN, 1) |
+ FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, rsrc->stats_idx_valid));
+ set_64bit_val(qp_ctx, 168,
+ FIELD_PREP(IRDMAQPC_QPCOMPCTX, (uintptr_t)qp));
+ set_64bit_val(qp_ctx, 176,
+ FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) |
+ FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) |
+ FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle));
+
+ print_hex_dump_debug("PUDA: PUDA QP CONTEXT", DUMP_PREFIX_OFFSET, 16,
+ 8, qp_ctx, IRDMA_QP_CTX_SIZE, false);
+}
+
+/**
+ * irdma_puda_qp_wqe - setup wqe for qp create
+ * @dev: Device
+ * @qp: Resource qp
+ */
+static enum irdma_status_code irdma_puda_qp_wqe(struct irdma_sc_dev *dev,
+ struct irdma_sc_qp *qp)
+{
+ struct irdma_sc_cqp *cqp;
+ __le64 *wqe;
+ u64 hdr;
+ struct irdma_ccq_cqe_info compl_info;
+ enum irdma_status_code status = 0;
+
+ cqp = dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, 0);
+ if (!wqe)
+ return IRDMA_ERR_RING_FULL;
+
+ set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
+ set_64bit_val(wqe, 40, qp->shadow_area_pa);
+
+ hdr = qp->qp_uk.qp_id |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_QP) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, IRDMA_QP_TYPE_UDA) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, 1) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, 2) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("PUDA: PUDA QP CREATE", DUMP_PREFIX_OFFSET, 16,
+ 8, wqe, 40, false);
+ irdma_sc_cqp_post_sq(cqp);
+ status = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_CREATE_QP,
+ &compl_info);
+
+ return status;
+}
+
+/**
+ * irdma_puda_qp_create - create qp for resource
+ * @rsrc: resource to use for buffer
+ */
+static enum irdma_status_code irdma_puda_qp_create(struct irdma_puda_rsrc *rsrc)
+{
+ struct irdma_sc_qp *qp = &rsrc->qp;
+ struct irdma_qp_uk *ukqp = &qp->qp_uk;
+ enum irdma_status_code ret = 0;
+ u32 sq_size, rq_size;
+ struct irdma_dma_mem *mem;
+
+ sq_size = rsrc->sq_size * IRDMA_QP_WQE_MIN_SIZE;
+ rq_size = rsrc->rq_size * IRDMA_QP_WQE_MIN_SIZE;
+ rsrc->qpmem.size = ALIGN((sq_size + rq_size + (IRDMA_SHADOW_AREA_SIZE << 3) + IRDMA_QP_CTX_SIZE),
+ IRDMA_HW_PAGE_SIZE);
+ rsrc->qpmem.va = dma_alloc_coherent(rsrc->dev->hw->device,
+ rsrc->qpmem.size, &rsrc->qpmem.pa,
+ GFP_KERNEL);
+ if (!rsrc->qpmem.va)
+ return IRDMA_ERR_NO_MEMORY;
+
+ mem = &rsrc->qpmem;
+ memset(mem->va, 0, rsrc->qpmem.size);
+ qp->hw_sq_size = irdma_get_encoded_wqe_size(rsrc->sq_size, IRDMA_QUEUE_TYPE_SQ_RQ);
+ qp->hw_rq_size = irdma_get_encoded_wqe_size(rsrc->rq_size, IRDMA_QUEUE_TYPE_SQ_RQ);
+ qp->pd = &rsrc->sc_pd;
+ qp->qp_uk.qp_type = IRDMA_QP_TYPE_UDA;
+ qp->dev = rsrc->dev;
+ qp->qp_uk.back_qp = rsrc;
+ qp->sq_pa = mem->pa;
+ qp->rq_pa = qp->sq_pa + sq_size;
+ qp->vsi = rsrc->vsi;
+ ukqp->sq_base = mem->va;
+ ukqp->rq_base = &ukqp->sq_base[rsrc->sq_size];
+ ukqp->shadow_area = ukqp->rq_base[rsrc->rq_size].elem;
+ ukqp->uk_attrs = &qp->dev->hw_attrs.uk_attrs;
+ qp->shadow_area_pa = qp->rq_pa + rq_size;
+ qp->hw_host_ctx = ukqp->shadow_area + IRDMA_SHADOW_AREA_SIZE;
+ qp->hw_host_ctx_pa = qp->shadow_area_pa + (IRDMA_SHADOW_AREA_SIZE << 3);
+ qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX;
+ ukqp->qp_id = rsrc->qp_id;
+ ukqp->sq_wrtrk_array = rsrc->sq_wrtrk_array;
+ ukqp->rq_wrid_array = rsrc->rq_wrid_array;
+ ukqp->sq_size = rsrc->sq_size;
+ ukqp->rq_size = rsrc->rq_size;
+
+ IRDMA_RING_INIT(ukqp->sq_ring, ukqp->sq_size);
+ IRDMA_RING_INIT(ukqp->initial_ring, ukqp->sq_size);
+ IRDMA_RING_INIT(ukqp->rq_ring, ukqp->rq_size);
+ ukqp->wqe_alloc_db = qp->pd->dev->wqe_alloc_db;
+
+ ret = rsrc->dev->ws_add(qp->vsi, qp->user_pri);
+ if (ret) {
+ dma_free_coherent(rsrc->dev->hw->device, rsrc->qpmem.size,
+ rsrc->qpmem.va, rsrc->qpmem.pa);
+ rsrc->qpmem.va = NULL;
+ return ret;
+ }
+
+ irdma_qp_add_qos(qp);
+ irdma_puda_qp_setctx(rsrc);
+
+ if (rsrc->dev->ceq_valid)
+ ret = irdma_cqp_qp_create_cmd(rsrc->dev, qp);
+ else
+ ret = irdma_puda_qp_wqe(rsrc->dev, qp);
+ if (ret) {
+ irdma_qp_rem_qos(qp);
+ rsrc->dev->ws_remove(qp->vsi, qp->user_pri);
+ dma_free_coherent(rsrc->dev->hw->device, rsrc->qpmem.size,
+ rsrc->qpmem.va, rsrc->qpmem.pa);
+ rsrc->qpmem.va = NULL;
+ }
+
+ return ret;
+}
+
+/**
+ * irdma_puda_cq_wqe - setup wqe for CQ create
+ * @dev: Device
+ * @cq: resource for cq
+ */
+static enum irdma_status_code irdma_puda_cq_wqe(struct irdma_sc_dev *dev,
+ struct irdma_sc_cq *cq)
+{
+ __le64 *wqe;
+ struct irdma_sc_cqp *cqp;
+ u64 hdr;
+ struct irdma_ccq_cqe_info compl_info;
+ enum irdma_status_code status = 0;
+
+ cqp = dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, 0);
+ if (!wqe)
+ return IRDMA_ERR_RING_FULL;
+
+ set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
+ set_64bit_val(wqe, 8, (uintptr_t)cq >> 1);
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, cq->shadow_read_threshold));
+ set_64bit_val(wqe, 32, cq->cq_pa);
+ set_64bit_val(wqe, 40, cq->shadow_area_pa);
+ set_64bit_val(wqe, 56,
+ FIELD_PREP(IRDMA_CQPSQ_TPHVAL, cq->tph_val) |
+ FIELD_PREP(IRDMA_CQPSQ_VSIIDX, cq->vsi->vsi_idx));
+
+ hdr = cq->cq_uk.cq_id |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CQ) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_CHKOVERFLOW, 1) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, 1) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, 1) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("PUDA: PUDA CREATE CQ", DUMP_PREFIX_OFFSET, 16,
+ 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ irdma_sc_cqp_post_sq(dev->cqp);
+ status = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_CREATE_CQ,
+ &compl_info);
+ if (!status) {
+ struct irdma_sc_ceq *ceq = dev->ceq[0];
+
+ if (ceq && ceq->reg_cq)
+ status = irdma_sc_add_cq_ctx(ceq, cq);
+ }
+
+ return status;
+}
+
+/**
+ * irdma_puda_cq_create - create cq for resource
+ * @rsrc: resource for which cq to create
+ */
+static enum irdma_status_code irdma_puda_cq_create(struct irdma_puda_rsrc *rsrc)
+{
+ struct irdma_sc_dev *dev = rsrc->dev;
+ struct irdma_sc_cq *cq = &rsrc->cq;
+ enum irdma_status_code ret = 0;
+ u32 cqsize;
+ struct irdma_dma_mem *mem;
+ struct irdma_cq_init_info info = {};
+ struct irdma_cq_uk_init_info *init_info = &info.cq_uk_init_info;
+
+ cq->vsi = rsrc->vsi;
+ cqsize = rsrc->cq_size * (sizeof(struct irdma_cqe));
+ rsrc->cqmem.size = ALIGN(cqsize + sizeof(struct irdma_cq_shadow_area),
+ IRDMA_CQ0_ALIGNMENT);
+ rsrc->cqmem.va = dma_alloc_coherent(dev->hw->device, rsrc->cqmem.size,
+ &rsrc->cqmem.pa, GFP_KERNEL);
+ if (!rsrc->cqmem.va)
+ return IRDMA_ERR_NO_MEMORY;
+
+ mem = &rsrc->cqmem;
+ info.dev = dev;
+ info.type = (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ) ?
+ IRDMA_CQ_TYPE_ILQ : IRDMA_CQ_TYPE_IEQ;
+ info.shadow_read_threshold = rsrc->cq_size >> 2;
+ info.cq_base_pa = mem->pa;
+ info.shadow_area_pa = mem->pa + cqsize;
+ init_info->cq_base = mem->va;
+ init_info->shadow_area = (__le64 *)((u8 *)mem->va + cqsize);
+ init_info->cq_size = rsrc->cq_size;
+ init_info->cq_id = rsrc->cq_id;
+ info.ceqe_mask = true;
+ info.ceq_id_valid = true;
+ info.vsi = rsrc->vsi;
+
+ ret = irdma_sc_cq_init(cq, &info);
+ if (ret)
+ goto error;
+
+ if (rsrc->dev->ceq_valid)
+ ret = irdma_cqp_cq_create_cmd(dev, cq);
+ else
+ ret = irdma_puda_cq_wqe(dev, cq);
+error:
+ if (ret) {
+ dma_free_coherent(dev->hw->device, rsrc->cqmem.size,
+ rsrc->cqmem.va, rsrc->cqmem.pa);
+ rsrc->cqmem.va = NULL;
+ }
+
+ return ret;
+}
+
+/**
+ * irdma_puda_free_qp - free qp for resource
+ * @rsrc: resource for which qp to free
+ */
+static void irdma_puda_free_qp(struct irdma_puda_rsrc *rsrc)
+{
+ enum irdma_status_code ret;
+ struct irdma_ccq_cqe_info compl_info;
+ struct irdma_sc_dev *dev = rsrc->dev;
+
+ if (rsrc->dev->ceq_valid) {
+ irdma_cqp_qp_destroy_cmd(dev, &rsrc->qp);
+ rsrc->dev->ws_remove(rsrc->qp.vsi, rsrc->qp.user_pri);
+ return;
+ }
+
+ ret = irdma_sc_qp_destroy(&rsrc->qp, 0, false, true, true);
+ if (ret)
+ ibdev_dbg(to_ibdev(dev),
+ "PUDA: error puda qp destroy wqe, status = %d\n",
+ ret);
+ if (!ret) {
+ ret = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_DESTROY_QP,
+ &compl_info);
+ if (ret)
+ ibdev_dbg(to_ibdev(dev),
+ "PUDA: error puda qp destroy failed, status = %d\n",
+ ret);
+ }
+ rsrc->dev->ws_remove(rsrc->qp.vsi, rsrc->qp.user_pri);
+}
+
+/**
+ * irdma_puda_free_cq - free cq for resource
+ * @rsrc: resource for which cq to free
+ */
+static void irdma_puda_free_cq(struct irdma_puda_rsrc *rsrc)
+{
+ enum irdma_status_code ret;
+ struct irdma_ccq_cqe_info compl_info;
+ struct irdma_sc_dev *dev = rsrc->dev;
+
+ if (rsrc->dev->ceq_valid) {
+ irdma_cqp_cq_destroy_cmd(dev, &rsrc->cq);
+ return;
+ }
+
+ ret = irdma_sc_cq_destroy(&rsrc->cq, 0, true);
+ if (ret)
+ ibdev_dbg(to_ibdev(dev), "PUDA: error ieq cq destroy\n");
+ if (!ret) {
+ ret = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_DESTROY_CQ,
+ &compl_info);
+ if (ret)
+ ibdev_dbg(to_ibdev(dev),
+ "PUDA: error ieq qp destroy done\n");
+ }
+}
+
+/**
+ * irdma_puda_dele_rsrc - delete all resources during close
+ * @vsi: VSI structure of device
+ * @type: type of resource to dele
+ * @reset: true if reset chip
+ */
+void irdma_puda_dele_rsrc(struct irdma_sc_vsi *vsi, enum puda_rsrc_type type,
+ bool reset)
+{
+ struct irdma_sc_dev *dev = vsi->dev;
+ struct irdma_puda_rsrc *rsrc;
+ struct irdma_puda_buf *buf = NULL;
+ struct irdma_puda_buf *nextbuf = NULL;
+ struct irdma_virt_mem *vmem;
+ struct irdma_sc_ceq *ceq;
+
+ ceq = vsi->dev->ceq[0];
+ switch (type) {
+ case IRDMA_PUDA_RSRC_TYPE_ILQ:
+ rsrc = vsi->ilq;
+ vmem = &vsi->ilq_mem;
+ vsi->ilq = NULL;
+ if (ceq && ceq->reg_cq)
+ irdma_sc_remove_cq_ctx(ceq, &rsrc->cq);
+ break;
+ case IRDMA_PUDA_RSRC_TYPE_IEQ:
+ rsrc = vsi->ieq;
+ vmem = &vsi->ieq_mem;
+ vsi->ieq = NULL;
+ if (ceq && ceq->reg_cq)
+ irdma_sc_remove_cq_ctx(ceq, &rsrc->cq);
+ break;
+ default:
+ ibdev_dbg(to_ibdev(dev), "PUDA: error resource type = 0x%x\n",
+ type);
+ return;
+ }
+
+ switch (rsrc->cmpl) {
+ case PUDA_HASH_CRC_COMPLETE:
+ irdma_free_hash_desc(rsrc->hash_desc);
+ fallthrough;
+ case PUDA_QP_CREATED:
+ irdma_qp_rem_qos(&rsrc->qp);
+
+ if (!reset)
+ irdma_puda_free_qp(rsrc);
+
+ dma_free_coherent(dev->hw->device, rsrc->qpmem.size,
+ rsrc->qpmem.va, rsrc->qpmem.pa);
+ rsrc->qpmem.va = NULL;
+ fallthrough;
+ case PUDA_CQ_CREATED:
+ if (!reset)
+ irdma_puda_free_cq(rsrc);
+
+ dma_free_coherent(dev->hw->device, rsrc->cqmem.size,
+ rsrc->cqmem.va, rsrc->cqmem.pa);
+ rsrc->cqmem.va = NULL;
+ break;
+ default:
+ ibdev_dbg(to_ibdev(rsrc->dev), "PUDA: error no resources\n");
+ break;
+ }
+ /* Free all allocated puda buffers for both tx and rx */
+ buf = rsrc->alloclist;
+ while (buf) {
+ nextbuf = buf->next;
+ irdma_puda_dele_buf(dev, buf);
+ buf = nextbuf;
+ rsrc->alloc_buf_count--;
+ }
+
+ kfree(vmem->va);
+}
+
+/**
+ * irdma_puda_allocbufs - allocate buffers for resource
+ * @rsrc: resource for buffer allocation
+ * @count: number of buffers to create
+ */
+static enum irdma_status_code irdma_puda_allocbufs(struct irdma_puda_rsrc *rsrc,
+ u32 count)
+{
+ u32 i;
+ struct irdma_puda_buf *buf;
+ struct irdma_puda_buf *nextbuf;
+
+ for (i = 0; i < count; i++) {
+ buf = irdma_puda_alloc_buf(rsrc->dev, rsrc->buf_size);
+ if (!buf) {
+ rsrc->stats_buf_alloc_fail++;
+ return IRDMA_ERR_NO_MEMORY;
+ }
+ irdma_puda_ret_bufpool(rsrc, buf);
+ rsrc->alloc_buf_count++;
+ if (!rsrc->alloclist) {
+ rsrc->alloclist = buf;
+ } else {
+ nextbuf = rsrc->alloclist;
+ rsrc->alloclist = buf;
+ buf->next = nextbuf;
+ }
+ }
+
+ rsrc->avail_buf_count = rsrc->alloc_buf_count;
+
+ return 0;
+}
+
+/**
+ * irdma_puda_create_rsrc - create resource (ilq or ieq)
+ * @vsi: sc VSI struct
+ * @info: resource information
+ */
+enum irdma_status_code irdma_puda_create_rsrc(struct irdma_sc_vsi *vsi,
+ struct irdma_puda_rsrc_info *info)
+{
+ struct irdma_sc_dev *dev = vsi->dev;
+ enum irdma_status_code ret = 0;
+ struct irdma_puda_rsrc *rsrc;
+ u32 pudasize;
+ u32 sqwridsize, rqwridsize;
+ struct irdma_virt_mem *vmem;
+
+ info->count = 1;
+ pudasize = sizeof(struct irdma_puda_rsrc);
+ sqwridsize = info->sq_size * sizeof(struct irdma_sq_uk_wr_trk_info);
+ rqwridsize = info->rq_size * 8;
+ switch (info->type) {
+ case IRDMA_PUDA_RSRC_TYPE_ILQ:
+ vmem = &vsi->ilq_mem;
+ break;
+ case IRDMA_PUDA_RSRC_TYPE_IEQ:
+ vmem = &vsi->ieq_mem;
+ break;
+ default:
+ return IRDMA_NOT_SUPPORTED;
+ }
+ vmem->size = pudasize + sqwridsize + rqwridsize;
+ vmem->va = kzalloc(vmem->size, GFP_KERNEL);
+ if (!vmem->va)
+ return IRDMA_ERR_NO_MEMORY;
+
+ rsrc = vmem->va;
+ spin_lock_init(&rsrc->bufpool_lock);
+ switch (info->type) {
+ case IRDMA_PUDA_RSRC_TYPE_ILQ:
+ vsi->ilq = vmem->va;
+ vsi->ilq_count = info->count;
+ rsrc->receive = info->receive;
+ rsrc->xmit_complete = info->xmit_complete;
+ break;
+ case IRDMA_PUDA_RSRC_TYPE_IEQ:
+ vsi->ieq_count = info->count;
+ vsi->ieq = vmem->va;
+ rsrc->receive = irdma_ieq_receive;
+ rsrc->xmit_complete = irdma_ieq_tx_compl;
+ break;
+ default:
+ return IRDMA_NOT_SUPPORTED;
+ }
+
+ rsrc->type = info->type;
+ rsrc->sq_wrtrk_array = (struct irdma_sq_uk_wr_trk_info *)
+ ((u8 *)vmem->va + pudasize);
+ rsrc->rq_wrid_array = (u64 *)((u8 *)vmem->va + pudasize + sqwridsize);
+ /* Initialize all ieq lists */
+ INIT_LIST_HEAD(&rsrc->bufpool);
+ INIT_LIST_HEAD(&rsrc->txpend);
+
+ rsrc->tx_wqe_avail_cnt = info->sq_size - 1;
+ irdma_sc_pd_init(dev, &rsrc->sc_pd, info->pd_id, info->abi_ver);
+ rsrc->qp_id = info->qp_id;
+ rsrc->cq_id = info->cq_id;
+ rsrc->sq_size = info->sq_size;
+ rsrc->rq_size = info->rq_size;
+ rsrc->cq_size = info->rq_size + info->sq_size;
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ if (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ)
+ rsrc->cq_size += info->rq_size;
+ }
+ rsrc->buf_size = info->buf_size;
+ rsrc->dev = dev;
+ rsrc->vsi = vsi;
+ rsrc->stats_idx = info->stats_idx;
+ rsrc->stats_idx_valid = info->stats_idx_valid;
+
+ ret = irdma_puda_cq_create(rsrc);
+ if (!ret) {
+ rsrc->cmpl = PUDA_CQ_CREATED;
+ ret = irdma_puda_qp_create(rsrc);
+ }
+ if (ret) {
+ ibdev_dbg(to_ibdev(dev),
+ "PUDA: error qp_create type=%d, status=%d\n",
+ rsrc->type, ret);
+ goto error;
+ }
+ rsrc->cmpl = PUDA_QP_CREATED;
+
+ ret = irdma_puda_allocbufs(rsrc, info->tx_buf_cnt + info->rq_size);
+ if (ret) {
+ ibdev_dbg(to_ibdev(dev), "PUDA: error alloc_buf\n");
+ goto error;
+ }
+
+ rsrc->rxq_invalid_cnt = info->rq_size;
+ ret = irdma_puda_replenish_rq(rsrc, true);
+ if (ret)
+ goto error;
+
+ if (info->type == IRDMA_PUDA_RSRC_TYPE_IEQ) {
+ if (!irdma_init_hash_desc(&rsrc->hash_desc)) {
+ rsrc->check_crc = true;
+ rsrc->cmpl = PUDA_HASH_CRC_COMPLETE;
+ ret = 0;
+ }
+ }
+
+ irdma_sc_ccq_arm(&rsrc->cq);
+ return ret;
+
+error:
+ irdma_puda_dele_rsrc(vsi, info->type, false);
+
+ return ret;
+}
+
+/**
+ * irdma_ilq_putback_rcvbuf - ilq buffer to put back on rq
+ * @qp: ilq's qp resource
+ * @buf: puda buffer for rcv q
+ * @wqe_idx: wqe index of completed rcvbuf
+ */
+static void irdma_ilq_putback_rcvbuf(struct irdma_sc_qp *qp,
+ struct irdma_puda_buf *buf, u32 wqe_idx)
+{
+ __le64 *wqe;
+ u64 offset8, offset24;
+
+ /* Synch buffer for use by device */
+ dma_sync_single_for_device(qp->dev->hw->device, buf->mem.pa,
+ buf->mem.size, DMA_BIDIRECTIONAL);
+ wqe = qp->qp_uk.rq_base[wqe_idx].elem;
+ get_64bit_val(wqe, 24, &offset24);
+ if (qp->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ get_64bit_val(wqe, 8, &offset8);
+ if (offset24)
+ offset8 &= ~FIELD_PREP(IRDMAQPSQ_VALID, 1);
+ else
+ offset8 |= FIELD_PREP(IRDMAQPSQ_VALID, 1);
+ set_64bit_val(wqe, 8, offset8);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+ }
+ if (offset24)
+ offset24 = 0;
+ else
+ offset24 = FIELD_PREP(IRDMAQPSQ_VALID, 1);
+
+ set_64bit_val(wqe, 24, offset24);
+}
+
+/**
+ * irdma_ieq_get_fpdu_len - get length of fpdu with or without marker
+ * @pfpdu: pointer to fpdu
+ * @datap: pointer to data in the buffer
+ * @rcv_seq: seqnum of the data buffer
+ */
+static u16 irdma_ieq_get_fpdu_len(struct irdma_pfpdu *pfpdu, u8 *datap,
+ u32 rcv_seq)
+{
+ u32 marker_seq, end_seq, blk_start;
+ u8 marker_len = pfpdu->marker_len;
+ u16 total_len = 0;
+ u16 fpdu_len;
+
+ blk_start = (pfpdu->rcv_start_seq - rcv_seq) & (IRDMA_MRK_BLK_SZ - 1);
+ if (!blk_start) {
+ total_len = marker_len;
+ marker_seq = rcv_seq + IRDMA_MRK_BLK_SZ;
+ if (marker_len && *(u32 *)datap)
+ return 0;
+ } else {
+ marker_seq = rcv_seq + blk_start;
+ }
+
+ datap += total_len;
+ fpdu_len = ntohs(*(__be16 *)datap);
+ fpdu_len += IRDMA_IEQ_MPA_FRAMING;
+ fpdu_len = (fpdu_len + 3) & 0xfffc;
+
+ if (fpdu_len > pfpdu->max_fpdu_data)
+ return 0;
+
+ total_len += fpdu_len;
+ end_seq = rcv_seq + total_len;
+ while ((int)(marker_seq - end_seq) < 0) {
+ total_len += marker_len;
+ end_seq += marker_len;
+ marker_seq += IRDMA_MRK_BLK_SZ;
+ }
+
+ return total_len;
+}
+
+/**
+ * irdma_ieq_copy_to_txbuf - copydata from rcv buf to tx buf
+ * @buf: rcv buffer with partial
+ * @txbuf: tx buffer for sending back
+ * @buf_offset: rcv buffer offset to copy from
+ * @txbuf_offset: at offset in tx buf to copy
+ * @len: length of data to copy
+ */
+static void irdma_ieq_copy_to_txbuf(struct irdma_puda_buf *buf,
+ struct irdma_puda_buf *txbuf,
+ u16 buf_offset, u32 txbuf_offset, u32 len)
+{
+ void *mem1 = (u8 *)buf->mem.va + buf_offset;
+ void *mem2 = (u8 *)txbuf->mem.va + txbuf_offset;
+
+ memcpy(mem2, mem1, len);
+}
+
+/**
+ * irdma_ieq_setup_tx_buf - setup tx buffer for partial handling
+ * @buf: reeive buffer with partial
+ * @txbuf: buffer to prepare
+ */
+static void irdma_ieq_setup_tx_buf(struct irdma_puda_buf *buf,
+ struct irdma_puda_buf *txbuf)
+{
+ txbuf->tcphlen = buf->tcphlen;
+ txbuf->ipv4 = buf->ipv4;
+
+ if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ txbuf->hdrlen = txbuf->tcphlen;
+ irdma_ieq_copy_to_txbuf(buf, txbuf, IRDMA_TCP_OFFSET, 0,
+ txbuf->hdrlen);
+ } else {
+ txbuf->maclen = buf->maclen;
+ txbuf->hdrlen = buf->hdrlen;
+ irdma_ieq_copy_to_txbuf(buf, txbuf, 0, 0, buf->hdrlen);
+ }
+}
+
+/**
+ * irdma_ieq_check_first_buf - check if rcv buffer's seq is in range
+ * @buf: receive exception buffer
+ * @fps: first partial sequence number
+ */
+static void irdma_ieq_check_first_buf(struct irdma_puda_buf *buf, u32 fps)
+{
+ u32 offset;
+
+ if (buf->seqnum < fps) {
+ offset = fps - buf->seqnum;
+ if (offset > buf->datalen)
+ return;
+ buf->data += offset;
+ buf->datalen -= (u16)offset;
+ buf->seqnum = fps;
+ }
+}
+
+/**
+ * irdma_ieq_compl_pfpdu - write txbuf with full fpdu
+ * @ieq: ieq resource
+ * @rxlist: ieq's received buffer list
+ * @pbufl: temporary list for buffers for fpddu
+ * @txbuf: tx buffer for fpdu
+ * @fpdu_len: total length of fpdu
+ */
+static void irdma_ieq_compl_pfpdu(struct irdma_puda_rsrc *ieq,
+ struct list_head *rxlist,
+ struct list_head *pbufl,
+ struct irdma_puda_buf *txbuf, u16 fpdu_len)
+{
+ struct irdma_puda_buf *buf;
+ u32 nextseqnum;
+ u16 txoffset, bufoffset;
+
+ buf = irdma_puda_get_listbuf(pbufl);
+ if (!buf)
+ return;
+
+ nextseqnum = buf->seqnum + fpdu_len;
+ irdma_ieq_setup_tx_buf(buf, txbuf);
+ if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ txoffset = txbuf->hdrlen;
+ txbuf->totallen = txbuf->hdrlen + fpdu_len;
+ txbuf->data = (u8 *)txbuf->mem.va + txoffset;
+ } else {
+ txoffset = buf->hdrlen;
+ txbuf->totallen = buf->hdrlen + fpdu_len;
+ txbuf->data = (u8 *)txbuf->mem.va + buf->hdrlen;
+ }
+ bufoffset = (u16)(buf->data - (u8 *)buf->mem.va);
+
+ do {
+ if (buf->datalen >= fpdu_len) {
+ /* copied full fpdu */
+ irdma_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset,
+ fpdu_len);
+ buf->datalen -= fpdu_len;
+ buf->data += fpdu_len;
+ buf->seqnum = nextseqnum;
+ break;
+ }
+ /* copy partial fpdu */
+ irdma_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset,
+ buf->datalen);
+ txoffset += buf->datalen;
+ fpdu_len -= buf->datalen;
+ irdma_puda_ret_bufpool(ieq, buf);
+ buf = irdma_puda_get_listbuf(pbufl);
+ if (!buf)
+ return;
+
+ bufoffset = (u16)(buf->data - (u8 *)buf->mem.va);
+ } while (1);
+
+ /* last buffer on the list*/
+ if (buf->datalen)
+ list_add(&buf->list, rxlist);
+ else
+ irdma_puda_ret_bufpool(ieq, buf);
+}
+
+/**
+ * irdma_ieq_create_pbufl - create buffer list for single fpdu
+ * @pfpdu: pointer to fpdu
+ * @rxlist: resource list for receive ieq buffes
+ * @pbufl: temp. list for buffers for fpddu
+ * @buf: first receive buffer
+ * @fpdu_len: total length of fpdu
+ */
+static enum irdma_status_code
+irdma_ieq_create_pbufl(struct irdma_pfpdu *pfpdu, struct list_head *rxlist,
+ struct list_head *pbufl, struct irdma_puda_buf *buf,
+ u16 fpdu_len)
+{
+ enum irdma_status_code status = 0;
+ struct irdma_puda_buf *nextbuf;
+ u32 nextseqnum;
+ u16 plen = fpdu_len - buf->datalen;
+ bool done = false;
+
+ nextseqnum = buf->seqnum + buf->datalen;
+ do {
+ nextbuf = irdma_puda_get_listbuf(rxlist);
+ if (!nextbuf) {
+ status = IRDMA_ERR_list_empty;
+ break;
+ }
+ list_add_tail(&nextbuf->list, pbufl);
+ if (nextbuf->seqnum != nextseqnum) {
+ pfpdu->bad_seq_num++;
+ status = IRDMA_ERR_SEQ_NUM;
+ break;
+ }
+ if (nextbuf->datalen >= plen) {
+ done = true;
+ } else {
+ plen -= nextbuf->datalen;
+ nextseqnum = nextbuf->seqnum + nextbuf->datalen;
+ }
+
+ } while (!done);
+
+ return status;
+}
+
+/**
+ * irdma_ieq_handle_partial - process partial fpdu buffer
+ * @ieq: ieq resource
+ * @pfpdu: partial management per user qp
+ * @buf: receive buffer
+ * @fpdu_len: fpdu len in the buffer
+ */
+static enum irdma_status_code
+irdma_ieq_handle_partial(struct irdma_puda_rsrc *ieq, struct irdma_pfpdu *pfpdu,
+ struct irdma_puda_buf *buf, u16 fpdu_len)
+{
+ enum irdma_status_code status = 0;
+ u8 *crcptr;
+ u32 mpacrc;
+ u32 seqnum = buf->seqnum;
+ struct list_head pbufl; /* partial buffer list */
+ struct irdma_puda_buf *txbuf = NULL;
+ struct list_head *rxlist = &pfpdu->rxlist;
+
+ ieq->partials_handled++;
+
+ INIT_LIST_HEAD(&pbufl);
+ list_add(&buf->list, &pbufl);
+
+ status = irdma_ieq_create_pbufl(pfpdu, rxlist, &pbufl, buf, fpdu_len);
+ if (status)
+ goto error;
+
+ txbuf = irdma_puda_get_bufpool(ieq);
+ if (!txbuf) {
+ pfpdu->no_tx_bufs++;
+ status = IRDMA_ERR_NO_TXBUFS;
+ goto error;
+ }
+
+ irdma_ieq_compl_pfpdu(ieq, rxlist, &pbufl, txbuf, fpdu_len);
+ irdma_ieq_update_tcpip_info(txbuf, fpdu_len, seqnum);
+
+ crcptr = txbuf->data + fpdu_len - 4;
+ mpacrc = *(u32 *)crcptr;
+ if (ieq->check_crc) {
+ status = irdma_ieq_check_mpacrc(ieq->hash_desc, txbuf->data,
+ (fpdu_len - 4), mpacrc);
+ if (status) {
+ ibdev_dbg(to_ibdev(ieq->dev), "IEQ: error bad crc\n");
+ goto error;
+ }
+ }
+
+ print_hex_dump_debug("IEQ: IEQ TX BUFFER", DUMP_PREFIX_OFFSET, 16, 8,
+ txbuf->mem.va, txbuf->totallen, false);
+ if (ieq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
+ txbuf->ah_id = pfpdu->ah->ah_info.ah_idx;
+ txbuf->do_lpb = true;
+ irdma_puda_send_buf(ieq, txbuf);
+ pfpdu->rcv_nxt = seqnum + fpdu_len;
+ return status;
+
+error:
+ while (!list_empty(&pbufl)) {
+ buf = list_last_entry(&pbufl, struct irdma_puda_buf, list);
+ list_move(&buf->list, rxlist);
+ }
+ if (txbuf)
+ irdma_puda_ret_bufpool(ieq, txbuf);
+
+ return status;
+}
+
+/**
+ * irdma_ieq_process_buf - process buffer rcvd for ieq
+ * @ieq: ieq resource
+ * @pfpdu: partial management per user qp
+ * @buf: receive buffer
+ */
+static enum irdma_status_code irdma_ieq_process_buf(struct irdma_puda_rsrc *ieq,
+ struct irdma_pfpdu *pfpdu,
+ struct irdma_puda_buf *buf)
+{
+ u16 fpdu_len = 0;
+ u16 datalen = buf->datalen;
+ u8 *datap = buf->data;
+ u8 *crcptr;
+ u16 ioffset = 0;
+ u32 mpacrc;
+ u32 seqnum = buf->seqnum;
+ u16 len = 0;
+ u16 full = 0;
+ bool partial = false;
+ struct irdma_puda_buf *txbuf;
+ struct list_head *rxlist = &pfpdu->rxlist;
+ enum irdma_status_code ret = 0;
+
+ ioffset = (u16)(buf->data - (u8 *)buf->mem.va);
+ while (datalen) {
+ fpdu_len = irdma_ieq_get_fpdu_len(pfpdu, datap, buf->seqnum);
+ if (!fpdu_len) {
+ ibdev_dbg(to_ibdev(ieq->dev),
+ "IEQ: error bad fpdu len\n");
+ list_add(&buf->list, rxlist);
+ return IRDMA_ERR_MPA_CRC;
+ }
+
+ if (datalen < fpdu_len) {
+ partial = true;
+ break;
+ }
+ crcptr = datap + fpdu_len - 4;
+ mpacrc = *(u32 *)crcptr;
+ if (ieq->check_crc)
+ ret = irdma_ieq_check_mpacrc(ieq->hash_desc, datap,
+ fpdu_len - 4, mpacrc);
+ if (ret) {
+ list_add(&buf->list, rxlist);
+ ibdev_dbg(to_ibdev(ieq->dev),
+ "ERR: IRDMA_ERR_MPA_CRC\n");
+ return IRDMA_ERR_MPA_CRC;
+ }
+ full++;
+ pfpdu->fpdu_processed++;
+ ieq->fpdu_processed++;
+ datap += fpdu_len;
+ len += fpdu_len;
+ datalen -= fpdu_len;
+ }
+ if (full) {
+ /* copy full pdu's in the txbuf and send them out */
+ txbuf = irdma_puda_get_bufpool(ieq);
+ if (!txbuf) {
+ pfpdu->no_tx_bufs++;
+ list_add(&buf->list, rxlist);
+ return IRDMA_ERR_NO_TXBUFS;
+ }
+ /* modify txbuf's buffer header */
+ irdma_ieq_setup_tx_buf(buf, txbuf);
+ /* copy full fpdu's to new buffer */
+ if (ieq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ irdma_ieq_copy_to_txbuf(buf, txbuf, ioffset,
+ txbuf->hdrlen, len);
+ txbuf->totallen = txbuf->hdrlen + len;
+ txbuf->ah_id = pfpdu->ah->ah_info.ah_idx;
+ } else {
+ irdma_ieq_copy_to_txbuf(buf, txbuf, ioffset,
+ buf->hdrlen, len);
+ txbuf->totallen = buf->hdrlen + len;
+ }
+ irdma_ieq_update_tcpip_info(txbuf, len, buf->seqnum);
+ print_hex_dump_debug("IEQ: IEQ TX BUFFER", DUMP_PREFIX_OFFSET,
+ 16, 8, txbuf->mem.va, txbuf->totallen,
+ false);
+ txbuf->do_lpb = true;
+ irdma_puda_send_buf(ieq, txbuf);
+
+ if (!datalen) {
+ pfpdu->rcv_nxt = buf->seqnum + len;
+ irdma_puda_ret_bufpool(ieq, buf);
+ return 0;
+ }
+ buf->data = datap;
+ buf->seqnum = seqnum + len;
+ buf->datalen = datalen;
+ pfpdu->rcv_nxt = buf->seqnum;
+ }
+ if (partial)
+ return irdma_ieq_handle_partial(ieq, pfpdu, buf, fpdu_len);
+
+ return 0;
+}
+
+/**
+ * irdma_ieq_process_fpdus - process fpdu's buffers on its list
+ * @qp: qp for which partial fpdus
+ * @ieq: ieq resource
+ */
+void irdma_ieq_process_fpdus(struct irdma_sc_qp *qp,
+ struct irdma_puda_rsrc *ieq)
+{
+ struct irdma_pfpdu *pfpdu = &qp->pfpdu;
+ struct list_head *rxlist = &pfpdu->rxlist;
+ struct irdma_puda_buf *buf;
+ enum irdma_status_code status;
+
+ do {
+ if (list_empty(rxlist))
+ break;
+ buf = irdma_puda_get_listbuf(rxlist);
+ if (!buf) {
+ ibdev_dbg(to_ibdev(ieq->dev), "IEQ: error no buf\n");
+ break;
+ }
+ if (buf->seqnum != pfpdu->rcv_nxt) {
+ /* This could be out of order or missing packet */
+ pfpdu->out_of_order++;
+ list_add(&buf->list, rxlist);
+ break;
+ }
+ /* keep processing buffers from the head of the list */
+ status = irdma_ieq_process_buf(ieq, pfpdu, buf);
+ if (status == IRDMA_ERR_MPA_CRC) {
+ pfpdu->mpa_crc_err = true;
+ while (!list_empty(rxlist)) {
+ buf = irdma_puda_get_listbuf(rxlist);
+ irdma_puda_ret_bufpool(ieq, buf);
+ pfpdu->crc_err++;
+ ieq->crc_err++;
+ }
+ /* create CQP for AE */
+ irdma_ieq_mpa_crc_ae(ieq->dev, qp);
+ }
+ } while (!status);
+}
+
+/**
+ * irdma_ieq_create_ah - create an address handle for IEQ
+ * @qp: qp pointer
+ * @buf: buf received on IEQ used to create AH
+ */
+static enum irdma_status_code irdma_ieq_create_ah(struct irdma_sc_qp *qp,
+ struct irdma_puda_buf *buf)
+{
+ struct irdma_ah_info ah_info = {};
+
+ qp->pfpdu.ah_buf = buf;
+ irdma_puda_ieq_get_ah_info(qp, &ah_info);
+ return irdma_puda_create_ah(qp->vsi->dev, &ah_info, false,
+ IRDMA_PUDA_RSRC_TYPE_IEQ, qp,
+ &qp->pfpdu.ah);
+}
+
+/**
+ * irdma_ieq_handle_exception - handle qp's exception
+ * @ieq: ieq resource
+ * @qp: qp receiving excpetion
+ * @buf: receive buffer
+ */
+static void irdma_ieq_handle_exception(struct irdma_puda_rsrc *ieq,
+ struct irdma_sc_qp *qp,
+ struct irdma_puda_buf *buf)
+{
+ struct irdma_pfpdu *pfpdu = &qp->pfpdu;
+ u32 *hw_host_ctx = (u32 *)qp->hw_host_ctx;
+ u32 rcv_wnd = hw_host_ctx[23];
+ /* first partial seq # in q2 */
+ u32 fps = *(u32 *)(qp->q2_buf + Q2_FPSN_OFFSET);
+ struct list_head *rxlist = &pfpdu->rxlist;
+ unsigned long flags = 0;
+ u8 hw_rev = qp->dev->hw_attrs.uk_attrs.hw_rev;
+
+ print_hex_dump_debug("IEQ: IEQ RX BUFFER", DUMP_PREFIX_OFFSET, 16, 8,
+ buf->mem.va, buf->totallen, false);
+
+ spin_lock_irqsave(&pfpdu->lock, flags);
+ pfpdu->total_ieq_bufs++;
+ if (pfpdu->mpa_crc_err) {
+ pfpdu->crc_err++;
+ goto error;
+ }
+ if (pfpdu->mode && fps != pfpdu->fps) {
+ /* clean up qp as it is new partial sequence */
+ irdma_ieq_cleanup_qp(ieq, qp);
+ ibdev_dbg(to_ibdev(ieq->dev), "IEQ: restarting new partial\n");
+ pfpdu->mode = false;
+ }
+
+ if (!pfpdu->mode) {
+ print_hex_dump_debug("IEQ: Q2 BUFFER", DUMP_PREFIX_OFFSET, 16,
+ 8, (u64 *)qp->q2_buf, 128, false);
+ /* First_Partial_Sequence_Number check */
+ pfpdu->rcv_nxt = fps;
+ pfpdu->fps = fps;
+ pfpdu->mode = true;
+ pfpdu->max_fpdu_data = (buf->ipv4) ?
+ (ieq->vsi->mtu - IRDMA_MTU_TO_MSS_IPV4) :
+ (ieq->vsi->mtu - IRDMA_MTU_TO_MSS_IPV6);
+ pfpdu->pmode_count++;
+ ieq->pmode_count++;
+ INIT_LIST_HEAD(rxlist);
+ irdma_ieq_check_first_buf(buf, fps);
+ }
+
+ if (!(rcv_wnd >= (buf->seqnum - pfpdu->rcv_nxt))) {
+ pfpdu->bad_seq_num++;
+ ieq->bad_seq_num++;
+ goto error;
+ }
+
+ if (!list_empty(rxlist)) {
+ if (buf->seqnum != pfpdu->nextseqnum) {
+ irdma_send_ieq_ack(qp);
+ /* throw away out-of-order, duplicates*/
+ goto error;
+ }
+ }
+ /* Insert buf before head */
+ list_add_tail(&buf->list, rxlist);
+ pfpdu->nextseqnum = buf->seqnum + buf->datalen;
+ pfpdu->lastrcv_buf = buf;
+ if (hw_rev >= IRDMA_GEN_2 && !pfpdu->ah) {
+ irdma_ieq_create_ah(qp, buf);
+ if (!pfpdu->ah)
+ goto error;
+ goto exit;
+ }
+ if (hw_rev == IRDMA_GEN_1)
+ irdma_ieq_process_fpdus(qp, ieq);
+ else if (pfpdu->ah && pfpdu->ah->ah_info.ah_valid)
+ irdma_ieq_process_fpdus(qp, ieq);
+exit:
+ spin_unlock_irqrestore(&pfpdu->lock, flags);
+
+ return;
+
+error:
+ irdma_puda_ret_bufpool(ieq, buf);
+ spin_unlock_irqrestore(&pfpdu->lock, flags);
+}
+
+/**
+ * irdma_ieq_receive - received exception buffer
+ * @vsi: VSI of device
+ * @buf: exception buffer received
+ */
+static void irdma_ieq_receive(struct irdma_sc_vsi *vsi,
+ struct irdma_puda_buf *buf)
+{
+ struct irdma_puda_rsrc *ieq = vsi->ieq;
+ struct irdma_sc_qp *qp = NULL;
+ u32 wqe_idx = ieq->compl_rxwqe_idx;
+
+ qp = irdma_ieq_get_qp(vsi->dev, buf);
+ if (!qp) {
+ ieq->stats_bad_qp_id++;
+ irdma_puda_ret_bufpool(ieq, buf);
+ } else {
+ irdma_ieq_handle_exception(ieq, qp, buf);
+ }
+ /*
+ * ieq->rx_wqe_idx is used by irdma_puda_replenish_rq()
+ * on which wqe_idx to start replenish rq
+ */
+ if (!ieq->rxq_invalid_cnt)
+ ieq->rx_wqe_idx = wqe_idx;
+ ieq->rxq_invalid_cnt++;
+}
+
+/**
+ * irdma_ieq_tx_compl - put back after sending completed exception buffer
+ * @vsi: sc VSI struct
+ * @sqwrid: pointer to puda buffer
+ */
+static void irdma_ieq_tx_compl(struct irdma_sc_vsi *vsi, void *sqwrid)
+{
+ struct irdma_puda_rsrc *ieq = vsi->ieq;
+ struct irdma_puda_buf *buf = sqwrid;
+
+ irdma_puda_ret_bufpool(ieq, buf);
+}
+
+/**
+ * irdma_ieq_cleanup_qp - qp is being destroyed
+ * @ieq: ieq resource
+ * @qp: all pending fpdu buffers
+ */
+void irdma_ieq_cleanup_qp(struct irdma_puda_rsrc *ieq, struct irdma_sc_qp *qp)
+{
+ struct irdma_puda_buf *buf;
+ struct irdma_pfpdu *pfpdu = &qp->pfpdu;
+ struct list_head *rxlist = &pfpdu->rxlist;
+
+ if (qp->pfpdu.ah) {
+ irdma_puda_free_ah(ieq->dev, qp->pfpdu.ah);
+ qp->pfpdu.ah = NULL;
+ qp->pfpdu.ah_buf = NULL;
+ }
+
+ if (!pfpdu->mode)
+ return;
+
+ while (!list_empty(rxlist)) {
+ buf = irdma_puda_get_listbuf(rxlist);
+ irdma_puda_ret_bufpool(ieq, buf);
+ }
+}
diff --git a/drivers/infiniband/hw/irdma/puda.h b/drivers/infiniband/hw/irdma/puda.h
new file mode 100644
index 000000000000..db3a51170020
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/puda.h
@@ -0,0 +1,194 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2015 - 2020 Intel Corporation */
+#ifndef IRDMA_PUDA_H
+#define IRDMA_PUDA_H
+
+#define IRDMA_IEQ_MPA_FRAMING 6
+#define IRDMA_TCP_OFFSET 40
+#define IRDMA_IPV4_PAD 20
+#define IRDMA_MRK_BLK_SZ 512
+
+enum puda_rsrc_type {
+ IRDMA_PUDA_RSRC_TYPE_ILQ = 1,
+ IRDMA_PUDA_RSRC_TYPE_IEQ,
+ IRDMA_PUDA_RSRC_TYPE_MAX, /* Must be last entry */
+};
+
+enum puda_rsrc_complete {
+ PUDA_CQ_CREATED = 1,
+ PUDA_QP_CREATED,
+ PUDA_TX_COMPLETE,
+ PUDA_RX_COMPLETE,
+ PUDA_HASH_CRC_COMPLETE,
+};
+
+struct irdma_sc_dev;
+struct irdma_sc_qp;
+struct irdma_sc_cq;
+
+struct irdma_puda_cmpl_info {
+ struct irdma_qp_uk *qp;
+ u8 q_type;
+ u8 l3proto;
+ u8 l4proto;
+ u16 vlan;
+ u32 payload_len;
+ u32 compl_error; /* No_err=0, else major and minor err code */
+ u32 qp_id;
+ u32 wqe_idx;
+ bool ipv4:1;
+ bool smac_valid:1;
+ bool vlan_valid:1;
+ u8 smac[ETH_ALEN];
+};
+
+struct irdma_puda_send_info {
+ u64 paddr; /* Physical address */
+ u32 len;
+ u32 ah_id;
+ u8 tcplen;
+ u8 maclen;
+ bool ipv4:1;
+ bool do_lpb:1;
+ void *scratch;
+};
+
+struct irdma_puda_buf {
+ struct list_head list; /* MUST be first entry */
+ struct irdma_dma_mem mem; /* DMA memory for the buffer */
+ struct irdma_puda_buf *next; /* for alloclist in rsrc struct */
+ struct irdma_virt_mem buf_mem; /* Buffer memory for this buffer */
+ void *scratch;
+ u8 *iph;
+ u8 *tcph;
+ u8 *data;
+ u16 datalen;
+ u16 vlan_id;
+ u8 tcphlen; /* tcp length in bytes */
+ u8 maclen; /* mac length in bytes */
+ u32 totallen; /* machlen+iphlen+tcphlen+datalen */
+ refcount_t refcount;
+ u8 hdrlen;
+ bool ipv4:1;
+ bool vlan_valid:1;
+ bool do_lpb:1; /* Loopback buffer */
+ bool smac_valid:1;
+ u32 seqnum;
+ u32 ah_id;
+ u8 smac[ETH_ALEN];
+ struct irdma_sc_vsi *vsi;
+};
+
+struct irdma_puda_rsrc_info {
+ void (*receive)(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *buf);
+ void (*xmit_complete)(struct irdma_sc_vsi *vsi, void *sqwrid);
+ enum puda_rsrc_type type; /* ILQ or IEQ */
+ u32 count;
+ u32 pd_id;
+ u32 cq_id;
+ u32 qp_id;
+ u32 sq_size;
+ u32 rq_size;
+ u32 tx_buf_cnt; /* total bufs allocated will be rq_size + tx_buf_cnt */
+ u16 buf_size;
+ u8 stats_idx;
+ bool stats_idx_valid:1;
+ int abi_ver;
+};
+
+struct irdma_puda_rsrc {
+ struct irdma_sc_cq cq;
+ struct irdma_sc_qp qp;
+ struct irdma_sc_pd sc_pd;
+ struct irdma_sc_dev *dev;
+ struct irdma_sc_vsi *vsi;
+ struct irdma_dma_mem cqmem;
+ struct irdma_dma_mem qpmem;
+ struct irdma_virt_mem ilq_mem;
+ enum puda_rsrc_complete cmpl;
+ enum puda_rsrc_type type;
+ u16 buf_size; /*buf must be max datalen + tcpip hdr + mac */
+ u32 cq_id;
+ u32 qp_id;
+ u32 sq_size;
+ u32 rq_size;
+ u32 cq_size;
+ struct irdma_sq_uk_wr_trk_info *sq_wrtrk_array;
+ u64 *rq_wrid_array;
+ u32 compl_rxwqe_idx;
+ u32 rx_wqe_idx;
+ u32 rxq_invalid_cnt;
+ u32 tx_wqe_avail_cnt;
+ struct shash_desc *hash_desc;
+ struct list_head txpend;
+ struct list_head bufpool; /* free buffers pool list for recv and xmit */
+ u32 alloc_buf_count;
+ u32 avail_buf_count; /* snapshot of currently available buffers */
+ spinlock_t bufpool_lock;
+ struct irdma_puda_buf *alloclist;
+ void (*receive)(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *buf);
+ void (*xmit_complete)(struct irdma_sc_vsi *vsi, void *sqwrid);
+ /* puda stats */
+ u64 stats_buf_alloc_fail;
+ u64 stats_pkt_rcvd;
+ u64 stats_pkt_sent;
+ u64 stats_rcvd_pkt_err;
+ u64 stats_sent_pkt_q;
+ u64 stats_bad_qp_id;
+ /* IEQ stats */
+ u64 fpdu_processed;
+ u64 bad_seq_num;
+ u64 crc_err;
+ u64 pmode_count;
+ u64 partials_handled;
+ u8 stats_idx;
+ bool check_crc:1;
+ bool stats_idx_valid:1;
+};
+
+struct irdma_puda_buf *irdma_puda_get_bufpool(struct irdma_puda_rsrc *rsrc);
+void irdma_puda_ret_bufpool(struct irdma_puda_rsrc *rsrc,
+ struct irdma_puda_buf *buf);
+void irdma_puda_send_buf(struct irdma_puda_rsrc *rsrc,
+ struct irdma_puda_buf *buf);
+enum irdma_status_code irdma_puda_send(struct irdma_sc_qp *qp,
+ struct irdma_puda_send_info *info);
+enum irdma_status_code
+irdma_puda_create_rsrc(struct irdma_sc_vsi *vsi,
+ struct irdma_puda_rsrc_info *info);
+void irdma_puda_dele_rsrc(struct irdma_sc_vsi *vsi, enum puda_rsrc_type type,
+ bool reset);
+enum irdma_status_code irdma_puda_poll_cmpl(struct irdma_sc_dev *dev,
+ struct irdma_sc_cq *cq,
+ u32 *compl_err);
+
+struct irdma_sc_qp *irdma_ieq_get_qp(struct irdma_sc_dev *dev,
+ struct irdma_puda_buf *buf);
+enum irdma_status_code
+irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
+ struct irdma_puda_buf *buf);
+enum irdma_status_code irdma_ieq_check_mpacrc(struct shash_desc *desc,
+ void *addr, u32 len, u32 val);
+enum irdma_status_code irdma_init_hash_desc(struct shash_desc **desc);
+void irdma_ieq_mpa_crc_ae(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp);
+void irdma_free_hash_desc(struct shash_desc *desc);
+void irdma_ieq_update_tcpip_info(struct irdma_puda_buf *buf, u16 len,
+ u32 seqnum);
+enum irdma_status_code irdma_cqp_qp_create_cmd(struct irdma_sc_dev *dev,
+ struct irdma_sc_qp *qp);
+enum irdma_status_code irdma_cqp_cq_create_cmd(struct irdma_sc_dev *dev,
+ struct irdma_sc_cq *cq);
+enum irdma_status_code irdma_cqp_qp_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp);
+void irdma_cqp_cq_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq);
+void irdma_puda_ieq_get_ah_info(struct irdma_sc_qp *qp,
+ struct irdma_ah_info *ah_info);
+enum irdma_status_code irdma_puda_create_ah(struct irdma_sc_dev *dev,
+ struct irdma_ah_info *ah_info,
+ bool wait, enum puda_rsrc_type type,
+ void *cb_param,
+ struct irdma_sc_ah **ah);
+void irdma_puda_free_ah(struct irdma_sc_dev *dev, struct irdma_sc_ah *ah);
+void irdma_ieq_process_fpdus(struct irdma_sc_qp *qp,
+ struct irdma_puda_rsrc *ieq);
+void irdma_ieq_cleanup_qp(struct irdma_puda_rsrc *ieq, struct irdma_sc_qp *qp);
+#endif /*IRDMA_PROTOS_H */
diff --git a/drivers/infiniband/hw/irdma/status.h b/drivers/infiniband/hw/irdma/status.h
new file mode 100644
index 000000000000..22ea3888253a
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/status.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2015 - 2020 Intel Corporation */
+#ifndef IRDMA_STATUS_H
+#define IRDMA_STATUS_H
+
+/* Error Codes */
+enum irdma_status_code {
+ IRDMA_SUCCESS = 0,
+ IRDMA_ERR_NVM = -1,
+ IRDMA_ERR_NVM_CHECKSUM = -2,
+ IRDMA_ERR_CFG = -4,
+ IRDMA_ERR_PARAM = -5,
+ IRDMA_ERR_DEVICE_NOT_SUPPORTED = -6,
+ IRDMA_ERR_RESET_FAILED = -7,
+ IRDMA_ERR_SWFW_SYNC = -8,
+ IRDMA_ERR_NO_MEMORY = -9,
+ IRDMA_ERR_BAD_PTR = -10,
+ IRDMA_ERR_INVALID_PD_ID = -11,
+ IRDMA_ERR_INVALID_QP_ID = -12,
+ IRDMA_ERR_INVALID_CQ_ID = -13,
+ IRDMA_ERR_INVALID_CEQ_ID = -14,
+ IRDMA_ERR_INVALID_AEQ_ID = -15,
+ IRDMA_ERR_INVALID_SIZE = -16,
+ IRDMA_ERR_INVALID_ARP_INDEX = -17,
+ IRDMA_ERR_INVALID_FPM_FUNC_ID = -18,
+ IRDMA_ERR_QP_INVALID_MSG_SIZE = -19,
+ IRDMA_ERR_QP_TOOMANY_WRS_POSTED = -20,
+ IRDMA_ERR_INVALID_FRAG_COUNT = -21,
+ IRDMA_ERR_Q_EMPTY = -22,
+ IRDMA_ERR_INVALID_ALIGNMENT = -23,
+ IRDMA_ERR_FLUSHED_Q = -24,
+ IRDMA_ERR_INVALID_PUSH_PAGE_INDEX = -25,
+ IRDMA_ERR_INVALID_INLINE_DATA_SIZE = -26,
+ IRDMA_ERR_TIMEOUT = -27,
+ IRDMA_ERR_OPCODE_MISMATCH = -28,
+ IRDMA_ERR_CQP_COMPL_ERROR = -29,
+ IRDMA_ERR_INVALID_VF_ID = -30,
+ IRDMA_ERR_INVALID_HMCFN_ID = -31,
+ IRDMA_ERR_BACKING_PAGE_ERROR = -32,
+ IRDMA_ERR_NO_PBLCHUNKS_AVAILABLE = -33,
+ IRDMA_ERR_INVALID_PBLE_INDEX = -34,
+ IRDMA_ERR_INVALID_SD_INDEX = -35,
+ IRDMA_ERR_INVALID_PAGE_DESC_INDEX = -36,
+ IRDMA_ERR_INVALID_SD_TYPE = -37,
+ IRDMA_ERR_MEMCPY_FAILED = -38,
+ IRDMA_ERR_INVALID_HMC_OBJ_INDEX = -39,
+ IRDMA_ERR_INVALID_HMC_OBJ_COUNT = -40,
+ IRDMA_ERR_BUF_TOO_SHORT = -43,
+ IRDMA_ERR_BAD_IWARP_CQE = -44,
+ IRDMA_ERR_NVM_BLANK_MODE = -45,
+ IRDMA_ERR_NOT_IMPL = -46,
+ IRDMA_ERR_PE_DOORBELL_NOT_ENA = -47,
+ IRDMA_ERR_NOT_READY = -48,
+ IRDMA_NOT_SUPPORTED = -49,
+ IRDMA_ERR_FIRMWARE_API_VER = -50,
+ IRDMA_ERR_RING_FULL = -51,
+ IRDMA_ERR_MPA_CRC = -61,
+ IRDMA_ERR_NO_TXBUFS = -62,
+ IRDMA_ERR_SEQ_NUM = -63,
+ IRDMA_ERR_list_empty = -64,
+ IRDMA_ERR_INVALID_MAC_ADDR = -65,
+ IRDMA_ERR_BAD_STAG = -66,
+ IRDMA_ERR_CQ_COMPL_ERROR = -67,
+ IRDMA_ERR_Q_DESTROYED = -68,
+ IRDMA_ERR_INVALID_FEAT_CNT = -69,
+ IRDMA_ERR_REG_CQ_FULL = -70,
+ IRDMA_ERR_VF_MSG_ERROR = -71,
+ IRDMA_ERR_NO_INTR = -72,
+ IRDMA_ERR_REG_QSET = -73,
+};
+#endif /* IRDMA_STATUS_H */
diff --git a/drivers/infiniband/hw/irdma/trace.c b/drivers/infiniband/hw/irdma/trace.c
new file mode 100644
index 000000000000..b5133f4137e0
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/trace.c
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2019 Intel Corporation */
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+const char *print_ip_addr(struct trace_seq *p, u32 *addr, u16 port, bool ipv4)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ if (ipv4) {
+ __be32 myaddr = htonl(*addr);
+
+ trace_seq_printf(p, "%pI4:%d", &myaddr, htons(port));
+ } else {
+ trace_seq_printf(p, "%pI6:%d", addr, htons(port));
+ }
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+const char *parse_iw_event_type(enum iw_cm_event_type iw_type)
+{
+ switch (iw_type) {
+ case IW_CM_EVENT_CONNECT_REQUEST:
+ return "IwRequest";
+ case IW_CM_EVENT_CONNECT_REPLY:
+ return "IwReply";
+ case IW_CM_EVENT_ESTABLISHED:
+ return "IwEstablished";
+ case IW_CM_EVENT_DISCONNECT:
+ return "IwDisconnect";
+ case IW_CM_EVENT_CLOSE:
+ return "IwClose";
+ }
+
+ return "Unknown";
+}
+
+const char *parse_cm_event_type(enum irdma_cm_event_type cm_type)
+{
+ switch (cm_type) {
+ case IRDMA_CM_EVENT_ESTABLISHED:
+ return "CmEstablished";
+ case IRDMA_CM_EVENT_MPA_REQ:
+ return "CmMPA_REQ";
+ case IRDMA_CM_EVENT_MPA_CONNECT:
+ return "CmMPA_CONNECT";
+ case IRDMA_CM_EVENT_MPA_ACCEPT:
+ return "CmMPA_ACCEPT";
+ case IRDMA_CM_EVENT_MPA_REJECT:
+ return "CmMPA_REJECT";
+ case IRDMA_CM_EVENT_MPA_ESTABLISHED:
+ return "CmMPA_ESTABLISHED";
+ case IRDMA_CM_EVENT_CONNECTED:
+ return "CmConnected";
+ case IRDMA_CM_EVENT_RESET:
+ return "CmReset";
+ case IRDMA_CM_EVENT_ABORTED:
+ return "CmAborted";
+ case IRDMA_CM_EVENT_UNKNOWN:
+ return "none";
+ }
+ return "Unknown";
+}
+
+const char *parse_cm_state(enum irdma_cm_node_state state)
+{
+ switch (state) {
+ case IRDMA_CM_STATE_UNKNOWN:
+ return "UNKNOWN";
+ case IRDMA_CM_STATE_INITED:
+ return "INITED";
+ case IRDMA_CM_STATE_LISTENING:
+ return "LISTENING";
+ case IRDMA_CM_STATE_SYN_RCVD:
+ return "SYN_RCVD";
+ case IRDMA_CM_STATE_SYN_SENT:
+ return "SYN_SENT";
+ case IRDMA_CM_STATE_ONE_SIDE_ESTABLISHED:
+ return "ONE_SIDE_ESTABLISHED";
+ case IRDMA_CM_STATE_ESTABLISHED:
+ return "ESTABLISHED";
+ case IRDMA_CM_STATE_ACCEPTING:
+ return "ACCEPTING";
+ case IRDMA_CM_STATE_MPAREQ_SENT:
+ return "MPAREQ_SENT";
+ case IRDMA_CM_STATE_MPAREQ_RCVD:
+ return "MPAREQ_RCVD";
+ case IRDMA_CM_STATE_MPAREJ_RCVD:
+ return "MPAREJ_RECVD";
+ case IRDMA_CM_STATE_OFFLOADED:
+ return "OFFLOADED";
+ case IRDMA_CM_STATE_FIN_WAIT1:
+ return "FIN_WAIT1";
+ case IRDMA_CM_STATE_FIN_WAIT2:
+ return "FIN_WAIT2";
+ case IRDMA_CM_STATE_CLOSE_WAIT:
+ return "CLOSE_WAIT";
+ case IRDMA_CM_STATE_TIME_WAIT:
+ return "TIME_WAIT";
+ case IRDMA_CM_STATE_LAST_ACK:
+ return "LAST_ACK";
+ case IRDMA_CM_STATE_CLOSING:
+ return "CLOSING";
+ case IRDMA_CM_STATE_LISTENER_DESTROYED:
+ return "LISTENER_DESTROYED";
+ case IRDMA_CM_STATE_CLOSED:
+ return "CLOSED";
+ }
+ return ("Bad state");
+}
diff --git a/drivers/infiniband/hw/irdma/trace.h b/drivers/infiniband/hw/irdma/trace.h
new file mode 100644
index 000000000000..702e4efb018d
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/trace.h
@@ -0,0 +1,3 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2019 Intel Corporation */
+#include "trace_cm.h"
diff --git a/drivers/infiniband/hw/irdma/trace_cm.h b/drivers/infiniband/hw/irdma/trace_cm.h
new file mode 100644
index 000000000000..bcf10ec427d6
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/trace_cm.h
@@ -0,0 +1,458 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2019 - 2021 Intel Corporation */
+#if !defined(__TRACE_CM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __TRACE_CM_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include "main.h"
+
+const char *print_ip_addr(struct trace_seq *p, u32 *addr, u16 port, bool ivp4);
+const char *parse_iw_event_type(enum iw_cm_event_type iw_type);
+const char *parse_cm_event_type(enum irdma_cm_event_type cm_type);
+const char *parse_cm_state(enum irdma_cm_node_state);
+#define __print_ip_addr(addr, port, ipv4) print_ip_addr(p, addr, port, ipv4)
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM irdma_cm
+
+TRACE_EVENT(irdma_create_listen,
+ TP_PROTO(struct irdma_device *iwdev, struct irdma_cm_info *cm_info),
+ TP_ARGS(iwdev, cm_info),
+ TP_STRUCT__entry(__field(struct irdma_device *, iwdev)
+ __dynamic_array(u32, laddr, 4)
+ __field(u16, lport)
+ __field(bool, ipv4)
+ ),
+ TP_fast_assign(__entry->iwdev = iwdev;
+ __entry->lport = cm_info->loc_port;
+ __entry->ipv4 = cm_info->ipv4;
+ memcpy(__get_dynamic_array(laddr),
+ cm_info->loc_addr, 4);
+ ),
+ TP_printk("iwdev=%p loc: %s",
+ __entry->iwdev,
+ __print_ip_addr(__get_dynamic_array(laddr),
+ __entry->lport, __entry->ipv4)
+ )
+);
+
+TRACE_EVENT(irdma_dec_refcnt_listen,
+ TP_PROTO(struct irdma_cm_listener *listener, void *caller),
+ TP_ARGS(listener, caller),
+ TP_STRUCT__entry(__field(struct irdma_device *, iwdev)
+ __field(u32, refcnt)
+ __dynamic_array(u32, laddr, 4)
+ __field(u16, lport)
+ __field(bool, ipv4)
+ __field(void *, caller)
+ ),
+ TP_fast_assign(__entry->iwdev = listener->iwdev;
+ __entry->lport = listener->loc_port;
+ __entry->ipv4 = listener->ipv4;
+ memcpy(__get_dynamic_array(laddr),
+ listener->loc_addr, 4);
+ ),
+ TP_printk("iwdev=%p caller=%pS loc: %s",
+ __entry->iwdev,
+ __entry->caller,
+ __print_ip_addr(__get_dynamic_array(laddr),
+ __entry->lport, __entry->ipv4)
+ )
+);
+
+DECLARE_EVENT_CLASS(listener_template,
+ TP_PROTO(struct irdma_cm_listener *listener),
+ TP_ARGS(listener),
+ TP_STRUCT__entry(__field(struct irdma_device *, iwdev)
+ __field(u16, lport)
+ __field(u16, vlan_id)
+ __field(bool, ipv4)
+ __field(enum irdma_cm_listener_state,
+ state)
+ __dynamic_array(u32, laddr, 4)
+ ),
+ TP_fast_assign(__entry->iwdev = listener->iwdev;
+ __entry->lport = listener->loc_port;
+ __entry->vlan_id = listener->vlan_id;
+ __entry->ipv4 = listener->ipv4;
+ __entry->state = listener->listener_state;
+ memcpy(__get_dynamic_array(laddr),
+ listener->loc_addr, 4);
+ ),
+ TP_printk("iwdev=%p vlan=%d loc: %s",
+ __entry->iwdev,
+ __entry->vlan_id,
+ __print_ip_addr(__get_dynamic_array(laddr),
+ __entry->lport, __entry->ipv4)
+ )
+);
+
+DEFINE_EVENT(listener_template, irdma_find_listener,
+ TP_PROTO(struct irdma_cm_listener *listener),
+ TP_ARGS(listener));
+
+DEFINE_EVENT(listener_template, irdma_del_multiple_qhash,
+ TP_PROTO(struct irdma_cm_listener *listener),
+ TP_ARGS(listener));
+
+TRACE_EVENT(irdma_negotiate_mpa_v2,
+ TP_PROTO(struct irdma_cm_node *cm_node),
+ TP_ARGS(cm_node),
+ TP_STRUCT__entry(__field(struct irdma_cm_node *, cm_node)
+ __field(u16, ord_size)
+ __field(u16, ird_size)
+ ),
+ TP_fast_assign(__entry->cm_node = cm_node;
+ __entry->ord_size = cm_node->ord_size;
+ __entry->ird_size = cm_node->ird_size;
+ ),
+ TP_printk("MPVA2 Negotiated cm_node=%p ORD:[%d], IRD:[%d]",
+ __entry->cm_node,
+ __entry->ord_size,
+ __entry->ird_size
+ )
+);
+
+DECLARE_EVENT_CLASS(tos_template,
+ TP_PROTO(struct irdma_device *iwdev, u8 tos, u8 user_pri),
+ TP_ARGS(iwdev, tos, user_pri),
+ TP_STRUCT__entry(__field(struct irdma_device *, iwdev)
+ __field(u8, tos)
+ __field(u8, user_pri)
+ ),
+ TP_fast_assign(__entry->iwdev = iwdev;
+ __entry->tos = tos;
+ __entry->user_pri = user_pri;
+ ),
+ TP_printk("iwdev=%p TOS:[%d] UP:[%d]",
+ __entry->iwdev,
+ __entry->tos,
+ __entry->user_pri
+ )
+);
+
+DEFINE_EVENT(tos_template, irdma_listener_tos,
+ TP_PROTO(struct irdma_device *iwdev, u8 tos, u8 user_pri),
+ TP_ARGS(iwdev, tos, user_pri));
+
+DEFINE_EVENT(tos_template, irdma_dcb_tos,
+ TP_PROTO(struct irdma_device *iwdev, u8 tos, u8 user_pri),
+ TP_ARGS(iwdev, tos, user_pri));
+
+DECLARE_EVENT_CLASS(qhash_template,
+ TP_PROTO(struct irdma_device *iwdev,
+ struct irdma_cm_listener *listener,
+ char *dev_addr),
+ TP_ARGS(iwdev, listener, dev_addr),
+ TP_STRUCT__entry(__field(struct irdma_device *, iwdev)
+ __field(u16, lport)
+ __field(u16, vlan_id)
+ __field(bool, ipv4)
+ __dynamic_array(u32, laddr, 4)
+ __dynamic_array(u32, mac, ETH_ALEN)
+ ),
+ TP_fast_assign(__entry->iwdev = iwdev;
+ __entry->lport = listener->loc_port;
+ __entry->vlan_id = listener->vlan_id;
+ __entry->ipv4 = listener->ipv4;
+ memcpy(__get_dynamic_array(laddr),
+ listener->loc_addr, 4);
+ ether_addr_copy(__get_dynamic_array(mac),
+ dev_addr);
+ ),
+ TP_printk("iwdev=%p vlan=%d MAC=%6phC loc: %s",
+ __entry->iwdev,
+ __entry->vlan_id,
+ __get_dynamic_array(mac),
+ __print_ip_addr(__get_dynamic_array(laddr),
+ __entry->lport, __entry->ipv4)
+ )
+);
+
+DEFINE_EVENT(qhash_template, irdma_add_mqh_6,
+ TP_PROTO(struct irdma_device *iwdev,
+ struct irdma_cm_listener *listener, char *dev_addr),
+ TP_ARGS(iwdev, listener, dev_addr));
+
+DEFINE_EVENT(qhash_template, irdma_add_mqh_4,
+ TP_PROTO(struct irdma_device *iwdev,
+ struct irdma_cm_listener *listener, char *dev_addr),
+ TP_ARGS(iwdev, listener, dev_addr));
+
+TRACE_EVENT(irdma_addr_resolve,
+ TP_PROTO(struct irdma_device *iwdev, char *dev_addr),
+ TP_ARGS(iwdev, dev_addr),
+ TP_STRUCT__entry(__field(struct irdma_device *, iwdev)
+ __dynamic_array(u8, mac, ETH_ALEN)
+ ),
+ TP_fast_assign(__entry->iwdev = iwdev;
+ ether_addr_copy(__get_dynamic_array(mac), dev_addr);
+ ),
+ TP_printk("iwdev=%p MAC=%6phC", __entry->iwdev,
+ __get_dynamic_array(mac)
+ )
+);
+
+TRACE_EVENT(irdma_send_cm_event,
+ TP_PROTO(struct irdma_cm_node *cm_node, struct iw_cm_id *cm_id,
+ enum iw_cm_event_type type, int status, void *caller),
+ TP_ARGS(cm_node, cm_id, type, status, caller),
+ TP_STRUCT__entry(__field(struct irdma_device *, iwdev)
+ __field(struct irdma_cm_node *, cm_node)
+ __field(struct iw_cm_id *, cm_id)
+ __field(u32, refcount)
+ __field(u16, lport)
+ __field(u16, rport)
+ __field(enum irdma_cm_node_state, state)
+ __field(bool, ipv4)
+ __field(u16, vlan_id)
+ __field(int, accel)
+ __field(enum iw_cm_event_type, type)
+ __field(int, status)
+ __field(void *, caller)
+ __dynamic_array(u32, laddr, 4)
+ __dynamic_array(u32, raddr, 4)
+ ),
+ TP_fast_assign(__entry->iwdev = cm_node->iwdev;
+ __entry->cm_node = cm_node;
+ __entry->cm_id = cm_id;
+ __entry->refcount = refcount_read(&cm_node->refcnt);
+ __entry->state = cm_node->state;
+ __entry->lport = cm_node->loc_port;
+ __entry->rport = cm_node->rem_port;
+ __entry->ipv4 = cm_node->ipv4;
+ __entry->vlan_id = cm_node->vlan_id;
+ __entry->accel = cm_node->accelerated;
+ __entry->type = type;
+ __entry->status = status;
+ __entry->caller = caller;
+ memcpy(__get_dynamic_array(laddr),
+ cm_node->loc_addr, 4);
+ memcpy(__get_dynamic_array(raddr),
+ cm_node->rem_addr, 4);
+ ),
+ TP_printk("iwdev=%p caller=%pS cm_id=%p node=%p refcnt=%d vlan_id=%d accel=%d state=%s event_type=%s status=%d loc: %s rem: %s",
+ __entry->iwdev,
+ __entry->caller,
+ __entry->cm_id,
+ __entry->cm_node,
+ __entry->refcount,
+ __entry->vlan_id,
+ __entry->accel,
+ parse_cm_state(__entry->state),
+ parse_iw_event_type(__entry->type),
+ __entry->status,
+ __print_ip_addr(__get_dynamic_array(laddr),
+ __entry->lport, __entry->ipv4),
+ __print_ip_addr(__get_dynamic_array(raddr),
+ __entry->rport, __entry->ipv4)
+ )
+);
+
+TRACE_EVENT(irdma_send_cm_event_no_node,
+ TP_PROTO(struct iw_cm_id *cm_id, enum iw_cm_event_type type,
+ int status, void *caller),
+ TP_ARGS(cm_id, type, status, caller),
+ TP_STRUCT__entry(__field(struct iw_cm_id *, cm_id)
+ __field(enum iw_cm_event_type, type)
+ __field(int, status)
+ __field(void *, caller)
+ ),
+ TP_fast_assign(__entry->cm_id = cm_id;
+ __entry->type = type;
+ __entry->status = status;
+ __entry->caller = caller;
+ ),
+ TP_printk("cm_id=%p caller=%pS event_type=%s status=%d",
+ __entry->cm_id,
+ __entry->caller,
+ parse_iw_event_type(__entry->type),
+ __entry->status
+ )
+);
+
+DECLARE_EVENT_CLASS(cm_node_template,
+ TP_PROTO(struct irdma_cm_node *cm_node,
+ enum irdma_cm_event_type type, void *caller),
+ TP_ARGS(cm_node, type, caller),
+ TP_STRUCT__entry(__field(struct irdma_device *, iwdev)
+ __field(struct irdma_cm_node *, cm_node)
+ __field(u32, refcount)
+ __field(u16, lport)
+ __field(u16, rport)
+ __field(enum irdma_cm_node_state, state)
+ __field(bool, ipv4)
+ __field(u16, vlan_id)
+ __field(int, accel)
+ __field(enum irdma_cm_event_type, type)
+ __field(void *, caller)
+ __dynamic_array(u32, laddr, 4)
+ __dynamic_array(u32, raddr, 4)
+ ),
+ TP_fast_assign(__entry->iwdev = cm_node->iwdev;
+ __entry->cm_node = cm_node;
+ __entry->refcount = refcount_read(&cm_node->refcnt);
+ __entry->state = cm_node->state;
+ __entry->lport = cm_node->loc_port;
+ __entry->rport = cm_node->rem_port;
+ __entry->ipv4 = cm_node->ipv4;
+ __entry->vlan_id = cm_node->vlan_id;
+ __entry->accel = cm_node->accelerated;
+ __entry->type = type;
+ __entry->caller = caller;
+ memcpy(__get_dynamic_array(laddr),
+ cm_node->loc_addr, 4);
+ memcpy(__get_dynamic_array(raddr),
+ cm_node->rem_addr, 4);
+ ),
+ TP_printk("iwdev=%p caller=%pS node=%p refcnt=%d vlan_id=%d accel=%d state=%s event_type=%s loc: %s rem: %s",
+ __entry->iwdev,
+ __entry->caller,
+ __entry->cm_node,
+ __entry->refcount,
+ __entry->vlan_id,
+ __entry->accel,
+ parse_cm_state(__entry->state),
+ parse_cm_event_type(__entry->type),
+ __print_ip_addr(__get_dynamic_array(laddr),
+ __entry->lport, __entry->ipv4),
+ __print_ip_addr(__get_dynamic_array(raddr),
+ __entry->rport, __entry->ipv4)
+ )
+);
+
+DEFINE_EVENT(cm_node_template, irdma_create_event,
+ TP_PROTO(struct irdma_cm_node *cm_node,
+ enum irdma_cm_event_type type, void *caller),
+ TP_ARGS(cm_node, type, caller));
+
+DEFINE_EVENT(cm_node_template, irdma_accept,
+ TP_PROTO(struct irdma_cm_node *cm_node,
+ enum irdma_cm_event_type type, void *caller),
+ TP_ARGS(cm_node, type, caller));
+
+DEFINE_EVENT(cm_node_template, irdma_connect,
+ TP_PROTO(struct irdma_cm_node *cm_node,
+ enum irdma_cm_event_type type, void *caller),
+ TP_ARGS(cm_node, type, caller));
+
+DEFINE_EVENT(cm_node_template, irdma_reject,
+ TP_PROTO(struct irdma_cm_node *cm_node,
+ enum irdma_cm_event_type type, void *caller),
+ TP_ARGS(cm_node, type, caller));
+
+DEFINE_EVENT(cm_node_template, irdma_find_node,
+ TP_PROTO(struct irdma_cm_node *cm_node,
+ enum irdma_cm_event_type type, void *caller),
+ TP_ARGS(cm_node, type, caller));
+
+DEFINE_EVENT(cm_node_template, irdma_send_reset,
+ TP_PROTO(struct irdma_cm_node *cm_node,
+ enum irdma_cm_event_type type, void *caller),
+ TP_ARGS(cm_node, type, caller));
+
+DEFINE_EVENT(cm_node_template, irdma_rem_ref_cm_node,
+ TP_PROTO(struct irdma_cm_node *cm_node,
+ enum irdma_cm_event_type type, void *caller),
+ TP_ARGS(cm_node, type, caller));
+
+DEFINE_EVENT(cm_node_template, irdma_cm_event_handler,
+ TP_PROTO(struct irdma_cm_node *cm_node,
+ enum irdma_cm_event_type type, void *caller),
+ TP_ARGS(cm_node, type, caller));
+
+TRACE_EVENT(open_err_template,
+ TP_PROTO(struct irdma_cm_node *cm_node, bool reset, void *caller),
+ TP_ARGS(cm_node, reset, caller),
+ TP_STRUCT__entry(__field(struct irdma_device *, iwdev)
+ __field(struct irdma_cm_node *, cm_node)
+ __field(enum irdma_cm_node_state, state)
+ __field(bool, reset)
+ __field(void *, caller)
+ ),
+ TP_fast_assign(__entry->iwdev = cm_node->iwdev;
+ __entry->cm_node = cm_node;
+ __entry->state = cm_node->state;
+ __entry->reset = reset;
+ __entry->caller = caller;
+ ),
+ TP_printk("iwdev=%p caller=%pS node%p reset=%d state=%s",
+ __entry->iwdev,
+ __entry->caller,
+ __entry->cm_node,
+ __entry->reset,
+ parse_cm_state(__entry->state)
+ )
+);
+
+DEFINE_EVENT(open_err_template, irdma_active_open_err,
+ TP_PROTO(struct irdma_cm_node *cm_node, bool reset, void *caller),
+ TP_ARGS(cm_node, reset, caller));
+
+DEFINE_EVENT(open_err_template, irdma_passive_open_err,
+ TP_PROTO(struct irdma_cm_node *cm_node, bool reset, void *caller),
+ TP_ARGS(cm_node, reset, caller));
+
+DECLARE_EVENT_CLASS(cm_node_ah_template,
+ TP_PROTO(struct irdma_cm_node *cm_node),
+ TP_ARGS(cm_node),
+ TP_STRUCT__entry(__field(struct irdma_device *, iwdev)
+ __field(struct irdma_cm_node *, cm_node)
+ __field(struct irdma_sc_ah *, ah)
+ __field(u32, refcount)
+ __field(u16, lport)
+ __field(u16, rport)
+ __field(enum irdma_cm_node_state, state)
+ __field(bool, ipv4)
+ __field(u16, vlan_id)
+ __field(int, accel)
+ __dynamic_array(u32, laddr, 4)
+ __dynamic_array(u32, raddr, 4)
+ ),
+ TP_fast_assign(__entry->iwdev = cm_node->iwdev;
+ __entry->cm_node = cm_node;
+ __entry->ah = cm_node->ah;
+ __entry->refcount = refcount_read(&cm_node->refcnt);
+ __entry->lport = cm_node->loc_port;
+ __entry->rport = cm_node->rem_port;
+ __entry->state = cm_node->state;
+ __entry->ipv4 = cm_node->ipv4;
+ __entry->vlan_id = cm_node->vlan_id;
+ __entry->accel = cm_node->accelerated;
+ memcpy(__get_dynamic_array(laddr),
+ cm_node->loc_addr, 4);
+ memcpy(__get_dynamic_array(raddr),
+ cm_node->rem_addr, 4);
+ ),
+ TP_printk("iwdev=%p node=%p ah=%p refcnt=%d vlan_id=%d accel=%d state=%s loc: %s rem: %s",
+ __entry->iwdev,
+ __entry->cm_node,
+ __entry->ah,
+ __entry->refcount,
+ __entry->vlan_id,
+ __entry->accel,
+ parse_cm_state(__entry->state),
+ __print_ip_addr(__get_dynamic_array(laddr),
+ __entry->lport, __entry->ipv4),
+ __print_ip_addr(__get_dynamic_array(raddr),
+ __entry->rport, __entry->ipv4)
+ )
+);
+
+DEFINE_EVENT(cm_node_ah_template, irdma_cm_free_ah,
+ TP_PROTO(struct irdma_cm_node *cm_node),
+ TP_ARGS(cm_node));
+
+DEFINE_EVENT(cm_node_ah_template, irdma_create_ah,
+ TP_PROTO(struct irdma_cm_node *cm_node),
+ TP_ARGS(cm_node));
+
+#endif /* __TRACE_CM_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_cm
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/hw/irdma/type.h b/drivers/infiniband/hw/irdma/type.h
new file mode 100644
index 000000000000..7387b83e826d
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/type.h
@@ -0,0 +1,1541 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#ifndef IRDMA_TYPE_H
+#define IRDMA_TYPE_H
+#include "status.h"
+#include "osdep.h"
+#include "irdma.h"
+#include "user.h"
+#include "hmc.h"
+#include "uda.h"
+#include "ws.h"
+#define IRDMA_DEBUG_ERR "ERR"
+#define IRDMA_DEBUG_INIT "INIT"
+#define IRDMA_DEBUG_DEV "DEV"
+#define IRDMA_DEBUG_CM "CM"
+#define IRDMA_DEBUG_VERBS "VERBS"
+#define IRDMA_DEBUG_PUDA "PUDA"
+#define IRDMA_DEBUG_ILQ "ILQ"
+#define IRDMA_DEBUG_IEQ "IEQ"
+#define IRDMA_DEBUG_QP "QP"
+#define IRDMA_DEBUG_CQ "CQ"
+#define IRDMA_DEBUG_MR "MR"
+#define IRDMA_DEBUG_PBLE "PBLE"
+#define IRDMA_DEBUG_WQE "WQE"
+#define IRDMA_DEBUG_AEQ "AEQ"
+#define IRDMA_DEBUG_CQP "CQP"
+#define IRDMA_DEBUG_HMC "HMC"
+#define IRDMA_DEBUG_USER "USER"
+#define IRDMA_DEBUG_VIRT "VIRT"
+#define IRDMA_DEBUG_DCB "DCB"
+#define IRDMA_DEBUG_CQE "CQE"
+#define IRDMA_DEBUG_CLNT "CLNT"
+#define IRDMA_DEBUG_WS "WS"
+#define IRDMA_DEBUG_STATS "STATS"
+
+enum irdma_page_size {
+ IRDMA_PAGE_SIZE_4K = 0,
+ IRDMA_PAGE_SIZE_2M,
+ IRDMA_PAGE_SIZE_1G,
+};
+
+enum irdma_hdrct_flags {
+ DDP_LEN_FLAG = 0x80,
+ DDP_HDR_FLAG = 0x40,
+ RDMA_HDR_FLAG = 0x20,
+};
+
+enum irdma_term_layers {
+ LAYER_RDMA = 0,
+ LAYER_DDP = 1,
+ LAYER_MPA = 2,
+};
+
+enum irdma_term_error_types {
+ RDMAP_REMOTE_PROT = 1,
+ RDMAP_REMOTE_OP = 2,
+ DDP_CATASTROPHIC = 0,
+ DDP_TAGGED_BUF = 1,
+ DDP_UNTAGGED_BUF = 2,
+ DDP_LLP = 3,
+};
+
+enum irdma_term_rdma_errors {
+ RDMAP_INV_STAG = 0x00,
+ RDMAP_INV_BOUNDS = 0x01,
+ RDMAP_ACCESS = 0x02,
+ RDMAP_UNASSOC_STAG = 0x03,
+ RDMAP_TO_WRAP = 0x04,
+ RDMAP_INV_RDMAP_VER = 0x05,
+ RDMAP_UNEXPECTED_OP = 0x06,
+ RDMAP_CATASTROPHIC_LOCAL = 0x07,
+ RDMAP_CATASTROPHIC_GLOBAL = 0x08,
+ RDMAP_CANT_INV_STAG = 0x09,
+ RDMAP_UNSPECIFIED = 0xff,
+};
+
+enum irdma_term_ddp_errors {
+ DDP_CATASTROPHIC_LOCAL = 0x00,
+ DDP_TAGGED_INV_STAG = 0x00,
+ DDP_TAGGED_BOUNDS = 0x01,
+ DDP_TAGGED_UNASSOC_STAG = 0x02,
+ DDP_TAGGED_TO_WRAP = 0x03,
+ DDP_TAGGED_INV_DDP_VER = 0x04,
+ DDP_UNTAGGED_INV_QN = 0x01,
+ DDP_UNTAGGED_INV_MSN_NO_BUF = 0x02,
+ DDP_UNTAGGED_INV_MSN_RANGE = 0x03,
+ DDP_UNTAGGED_INV_MO = 0x04,
+ DDP_UNTAGGED_INV_TOO_LONG = 0x05,
+ DDP_UNTAGGED_INV_DDP_VER = 0x06,
+};
+
+enum irdma_term_mpa_errors {
+ MPA_CLOSED = 0x01,
+ MPA_CRC = 0x02,
+ MPA_MARKER = 0x03,
+ MPA_REQ_RSP = 0x04,
+};
+
+enum irdma_qp_event_type {
+ IRDMA_QP_EVENT_CATASTROPHIC,
+ IRDMA_QP_EVENT_ACCESS_ERR,
+};
+
+enum irdma_hw_stats_index_32b {
+ IRDMA_HW_STAT_INDEX_IP4RXDISCARD = 0,
+ IRDMA_HW_STAT_INDEX_IP4RXTRUNC = 1,
+ IRDMA_HW_STAT_INDEX_IP4TXNOROUTE = 2,
+ IRDMA_HW_STAT_INDEX_IP6RXDISCARD = 3,
+ IRDMA_HW_STAT_INDEX_IP6RXTRUNC = 4,
+ IRDMA_HW_STAT_INDEX_IP6TXNOROUTE = 5,
+ IRDMA_HW_STAT_INDEX_TCPRTXSEG = 6,
+ IRDMA_HW_STAT_INDEX_TCPRXOPTERR = 7,
+ IRDMA_HW_STAT_INDEX_TCPRXPROTOERR = 8,
+ IRDMA_HW_STAT_INDEX_MAX_32_GEN_1 = 9, /* Must be same value as next entry */
+ IRDMA_HW_STAT_INDEX_RXVLANERR = 9,
+ IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED = 10,
+ IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED = 11,
+ IRDMA_HW_STAT_INDEX_TXNPCNPSENT = 12,
+ IRDMA_HW_STAT_INDEX_MAX_32, /* Must be last entry */
+};
+
+enum irdma_hw_stats_index_64b {
+ IRDMA_HW_STAT_INDEX_IP4RXOCTS = 0,
+ IRDMA_HW_STAT_INDEX_IP4RXPKTS = 1,
+ IRDMA_HW_STAT_INDEX_IP4RXFRAGS = 2,
+ IRDMA_HW_STAT_INDEX_IP4RXMCPKTS = 3,
+ IRDMA_HW_STAT_INDEX_IP4TXOCTS = 4,
+ IRDMA_HW_STAT_INDEX_IP4TXPKTS = 5,
+ IRDMA_HW_STAT_INDEX_IP4TXFRAGS = 6,
+ IRDMA_HW_STAT_INDEX_IP4TXMCPKTS = 7,
+ IRDMA_HW_STAT_INDEX_IP6RXOCTS = 8,
+ IRDMA_HW_STAT_INDEX_IP6RXPKTS = 9,
+ IRDMA_HW_STAT_INDEX_IP6RXFRAGS = 10,
+ IRDMA_HW_STAT_INDEX_IP6RXMCPKTS = 11,
+ IRDMA_HW_STAT_INDEX_IP6TXOCTS = 12,
+ IRDMA_HW_STAT_INDEX_IP6TXPKTS = 13,
+ IRDMA_HW_STAT_INDEX_IP6TXFRAGS = 14,
+ IRDMA_HW_STAT_INDEX_IP6TXMCPKTS = 15,
+ IRDMA_HW_STAT_INDEX_TCPRXSEGS = 16,
+ IRDMA_HW_STAT_INDEX_TCPTXSEG = 17,
+ IRDMA_HW_STAT_INDEX_RDMARXRDS = 18,
+ IRDMA_HW_STAT_INDEX_RDMARXSNDS = 19,
+ IRDMA_HW_STAT_INDEX_RDMARXWRS = 20,
+ IRDMA_HW_STAT_INDEX_RDMATXRDS = 21,
+ IRDMA_HW_STAT_INDEX_RDMATXSNDS = 22,
+ IRDMA_HW_STAT_INDEX_RDMATXWRS = 23,
+ IRDMA_HW_STAT_INDEX_RDMAVBND = 24,
+ IRDMA_HW_STAT_INDEX_RDMAVINV = 25,
+ IRDMA_HW_STAT_INDEX_MAX_64_GEN_1 = 26, /* Must be same value as next entry */
+ IRDMA_HW_STAT_INDEX_IP4RXMCOCTS = 26,
+ IRDMA_HW_STAT_INDEX_IP4TXMCOCTS = 27,
+ IRDMA_HW_STAT_INDEX_IP6RXMCOCTS = 28,
+ IRDMA_HW_STAT_INDEX_IP6TXMCOCTS = 29,
+ IRDMA_HW_STAT_INDEX_UDPRXPKTS = 30,
+ IRDMA_HW_STAT_INDEX_UDPTXPKTS = 31,
+ IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS = 32,
+ IRDMA_HW_STAT_INDEX_MAX_64, /* Must be last entry */
+};
+
+enum irdma_feature_type {
+ IRDMA_FEATURE_FW_INFO = 0,
+ IRDMA_HW_VERSION_INFO = 1,
+ IRDMA_QSETS_MAX = 26,
+ IRDMA_MAX_FEATURES, /* Must be last entry */
+};
+
+enum irdma_sched_prio_type {
+ IRDMA_PRIO_WEIGHTED_RR = 1,
+ IRDMA_PRIO_STRICT = 2,
+ IRDMA_PRIO_WEIGHTED_STRICT = 3,
+};
+
+enum irdma_vm_vf_type {
+ IRDMA_VF_TYPE = 0,
+ IRDMA_VM_TYPE,
+ IRDMA_PF_TYPE,
+};
+
+enum irdma_cqp_hmc_profile {
+ IRDMA_HMC_PROFILE_DEFAULT = 1,
+ IRDMA_HMC_PROFILE_FAVOR_VF = 2,
+ IRDMA_HMC_PROFILE_EQUAL = 3,
+};
+
+enum irdma_quad_entry_type {
+ IRDMA_QHASH_TYPE_TCP_ESTABLISHED = 1,
+ IRDMA_QHASH_TYPE_TCP_SYN,
+ IRDMA_QHASH_TYPE_UDP_UNICAST,
+ IRDMA_QHASH_TYPE_UDP_MCAST,
+ IRDMA_QHASH_TYPE_ROCE_MCAST,
+ IRDMA_QHASH_TYPE_ROCEV2_HW,
+};
+
+enum irdma_quad_hash_manage_type {
+ IRDMA_QHASH_MANAGE_TYPE_DELETE = 0,
+ IRDMA_QHASH_MANAGE_TYPE_ADD,
+ IRDMA_QHASH_MANAGE_TYPE_MODIFY,
+};
+
+enum irdma_syn_rst_handling {
+ IRDMA_SYN_RST_HANDLING_HW_TCP_SECURE = 0,
+ IRDMA_SYN_RST_HANDLING_HW_TCP,
+ IRDMA_SYN_RST_HANDLING_FW_TCP_SECURE,
+ IRDMA_SYN_RST_HANDLING_FW_TCP,
+};
+
+enum irdma_queue_type {
+ IRDMA_QUEUE_TYPE_SQ_RQ = 0,
+ IRDMA_QUEUE_TYPE_CQP,
+};
+
+struct irdma_sc_dev;
+struct irdma_vsi_pestat;
+
+struct irdma_dcqcn_cc_params {
+ u8 cc_cfg_valid;
+ u8 min_dec_factor;
+ u8 min_rate;
+ u8 dcqcn_f;
+ u16 rai_factor;
+ u16 hai_factor;
+ u16 dcqcn_t;
+ u32 dcqcn_b;
+ u32 rreduce_mperiod;
+};
+
+struct irdma_cqp_init_info {
+ u64 cqp_compl_ctx;
+ u64 host_ctx_pa;
+ u64 sq_pa;
+ struct irdma_sc_dev *dev;
+ struct irdma_cqp_quanta *sq;
+ struct irdma_dcqcn_cc_params dcqcn_params;
+ __le64 *host_ctx;
+ u64 *scratch_array;
+ u32 sq_size;
+ u16 hw_maj_ver;
+ u16 hw_min_ver;
+ u8 struct_ver;
+ u8 hmc_profile;
+ u8 ena_vf_count;
+ u8 ceqs_per_vf;
+ bool en_datacenter_tcp:1;
+ bool disable_packed:1;
+ bool rocev2_rto_policy:1;
+ enum irdma_protocol_used protocol_used;
+};
+
+struct irdma_terminate_hdr {
+ u8 layer_etype;
+ u8 error_code;
+ u8 hdrct;
+ u8 rsvd;
+};
+
+struct irdma_cqp_sq_wqe {
+ __le64 buf[IRDMA_CQP_WQE_SIZE];
+};
+
+struct irdma_sc_aeqe {
+ __le64 buf[IRDMA_AEQE_SIZE];
+};
+
+struct irdma_ceqe {
+ __le64 buf[IRDMA_CEQE_SIZE];
+};
+
+struct irdma_cqp_ctx {
+ __le64 buf[IRDMA_CQP_CTX_SIZE];
+};
+
+struct irdma_cq_shadow_area {
+ __le64 buf[IRDMA_SHADOW_AREA_SIZE];
+};
+
+struct irdma_dev_hw_stats_offsets {
+ u32 stats_offset_32[IRDMA_HW_STAT_INDEX_MAX_32];
+ u32 stats_offset_64[IRDMA_HW_STAT_INDEX_MAX_64];
+};
+
+struct irdma_dev_hw_stats {
+ u64 stats_val_32[IRDMA_HW_STAT_INDEX_MAX_32];
+ u64 stats_val_64[IRDMA_HW_STAT_INDEX_MAX_64];
+};
+
+struct irdma_gather_stats {
+ u32 rsvd1;
+ u32 rxvlanerr;
+ u64 ip4rxocts;
+ u64 ip4rxpkts;
+ u32 ip4rxtrunc;
+ u32 ip4rxdiscard;
+ u64 ip4rxfrags;
+ u64 ip4rxmcocts;
+ u64 ip4rxmcpkts;
+ u64 ip6rxocts;
+ u64 ip6rxpkts;
+ u32 ip6rxtrunc;
+ u32 ip6rxdiscard;
+ u64 ip6rxfrags;
+ u64 ip6rxmcocts;
+ u64 ip6rxmcpkts;
+ u64 ip4txocts;
+ u64 ip4txpkts;
+ u64 ip4txfrag;
+ u64 ip4txmcocts;
+ u64 ip4txmcpkts;
+ u64 ip6txocts;
+ u64 ip6txpkts;
+ u64 ip6txfrags;
+ u64 ip6txmcocts;
+ u64 ip6txmcpkts;
+ u32 ip6txnoroute;
+ u32 ip4txnoroute;
+ u64 tcprxsegs;
+ u32 tcprxprotoerr;
+ u32 tcprxopterr;
+ u64 tcptxsegs;
+ u32 rsvd2;
+ u32 tcprtxseg;
+ u64 udprxpkts;
+ u64 udptxpkts;
+ u64 rdmarxwrs;
+ u64 rdmarxrds;
+ u64 rdmarxsnds;
+ u64 rdmatxwrs;
+ u64 rdmatxrds;
+ u64 rdmatxsnds;
+ u64 rdmavbn;
+ u64 rdmavinv;
+ u64 rxnpecnmrkpkts;
+ u32 rxrpcnphandled;
+ u32 rxrpcnpignored;
+ u32 txnpcnpsent;
+ u32 rsvd3[88];
+};
+
+struct irdma_stats_gather_info {
+ bool use_hmc_fcn_index:1;
+ bool use_stats_inst:1;
+ u8 hmc_fcn_index;
+ u8 stats_inst_index;
+ struct irdma_dma_mem stats_buff_mem;
+ void *gather_stats_va;
+ void *last_gather_stats_va;
+};
+
+struct irdma_vsi_pestat {
+ struct irdma_hw *hw;
+ struct irdma_dev_hw_stats hw_stats;
+ struct irdma_stats_gather_info gather_info;
+ struct timer_list stats_timer;
+ struct irdma_sc_vsi *vsi;
+ struct irdma_dev_hw_stats last_hw_stats;
+ spinlock_t lock; /* rdma stats lock */
+};
+
+struct irdma_hw {
+ u8 __iomem *hw_addr;
+ u8 __iomem *priv_hw_addr;
+ struct device *device;
+ struct irdma_hmc_info hmc;
+};
+
+struct irdma_pfpdu {
+ struct list_head rxlist;
+ u32 rcv_nxt;
+ u32 fps;
+ u32 max_fpdu_data;
+ u32 nextseqnum;
+ u32 rcv_start_seq;
+ bool mode:1;
+ bool mpa_crc_err:1;
+ u8 marker_len;
+ u64 total_ieq_bufs;
+ u64 fpdu_processed;
+ u64 bad_seq_num;
+ u64 crc_err;
+ u64 no_tx_bufs;
+ u64 tx_err;
+ u64 out_of_order;
+ u64 pmode_count;
+ struct irdma_sc_ah *ah;
+ struct irdma_puda_buf *ah_buf;
+ spinlock_t lock; /* fpdu processing lock */
+ struct irdma_puda_buf *lastrcv_buf;
+};
+
+struct irdma_sc_pd {
+ struct irdma_sc_dev *dev;
+ u32 pd_id;
+ int abi_ver;
+};
+
+struct irdma_cqp_quanta {
+ __le64 elem[IRDMA_CQP_WQE_SIZE];
+};
+
+struct irdma_sc_cqp {
+ u32 size;
+ u64 sq_pa;
+ u64 host_ctx_pa;
+ void *back_cqp;
+ struct irdma_sc_dev *dev;
+ enum irdma_status_code (*process_cqp_sds)(struct irdma_sc_dev *dev,
+ struct irdma_update_sds_info *info);
+ struct irdma_dma_mem sdbuf;
+ struct irdma_ring sq_ring;
+ struct irdma_cqp_quanta *sq_base;
+ struct irdma_dcqcn_cc_params dcqcn_params;
+ __le64 *host_ctx;
+ u64 *scratch_array;
+ u32 cqp_id;
+ u32 sq_size;
+ u32 hw_sq_size;
+ u16 hw_maj_ver;
+ u16 hw_min_ver;
+ u8 struct_ver;
+ u8 polarity;
+ u8 hmc_profile;
+ u8 ena_vf_count;
+ u8 timeout_count;
+ u8 ceqs_per_vf;
+ bool en_datacenter_tcp:1;
+ bool disable_packed:1;
+ bool rocev2_rto_policy:1;
+ enum irdma_protocol_used protocol_used;
+};
+
+struct irdma_sc_aeq {
+ u32 size;
+ u64 aeq_elem_pa;
+ struct irdma_sc_dev *dev;
+ struct irdma_sc_aeqe *aeqe_base;
+ void *pbl_list;
+ u32 elem_cnt;
+ struct irdma_ring aeq_ring;
+ u8 pbl_chunk_size;
+ u32 first_pm_pbl_idx;
+ u32 msix_idx;
+ u8 polarity;
+ bool virtual_map:1;
+};
+
+struct irdma_sc_ceq {
+ u32 size;
+ u64 ceq_elem_pa;
+ struct irdma_sc_dev *dev;
+ struct irdma_ceqe *ceqe_base;
+ void *pbl_list;
+ u32 ceq_id;
+ u32 elem_cnt;
+ struct irdma_ring ceq_ring;
+ u8 pbl_chunk_size;
+ u8 tph_val;
+ u32 first_pm_pbl_idx;
+ u8 polarity;
+ struct irdma_sc_vsi *vsi;
+ struct irdma_sc_cq **reg_cq;
+ u32 reg_cq_size;
+ spinlock_t req_cq_lock; /* protect access to reg_cq array */
+ bool virtual_map:1;
+ bool tph_en:1;
+ bool itr_no_expire:1;
+};
+
+struct irdma_sc_cq {
+ struct irdma_cq_uk cq_uk;
+ u64 cq_pa;
+ u64 shadow_area_pa;
+ struct irdma_sc_dev *dev;
+ struct irdma_sc_vsi *vsi;
+ void *pbl_list;
+ void *back_cq;
+ u32 ceq_id;
+ u32 shadow_read_threshold;
+ u8 pbl_chunk_size;
+ u8 cq_type;
+ u8 tph_val;
+ u32 first_pm_pbl_idx;
+ bool ceqe_mask:1;
+ bool virtual_map:1;
+ bool check_overflow:1;
+ bool ceq_id_valid:1;
+ bool tph_en;
+};
+
+struct irdma_sc_qp {
+ struct irdma_qp_uk qp_uk;
+ u64 sq_pa;
+ u64 rq_pa;
+ u64 hw_host_ctx_pa;
+ u64 shadow_area_pa;
+ u64 q2_pa;
+ struct irdma_sc_dev *dev;
+ struct irdma_sc_vsi *vsi;
+ struct irdma_sc_pd *pd;
+ __le64 *hw_host_ctx;
+ void *llp_stream_handle;
+ struct irdma_pfpdu pfpdu;
+ u32 ieq_qp;
+ u8 *q2_buf;
+ u64 qp_compl_ctx;
+ u32 push_idx;
+ u16 qs_handle;
+ u16 push_offset;
+ u8 flush_wqes_count;
+ u8 sq_tph_val;
+ u8 rq_tph_val;
+ u8 qp_state;
+ u8 hw_sq_size;
+ u8 hw_rq_size;
+ u8 src_mac_addr_idx;
+ bool on_qoslist:1;
+ bool ieq_pass_thru:1;
+ bool sq_tph_en:1;
+ bool rq_tph_en:1;
+ bool rcv_tph_en:1;
+ bool xmit_tph_en:1;
+ bool virtual_map:1;
+ bool flush_sq:1;
+ bool flush_rq:1;
+ bool sq_flush_code:1;
+ bool rq_flush_code:1;
+ enum irdma_flush_opcode flush_code;
+ enum irdma_qp_event_type event_type;
+ u8 term_flags;
+ u8 user_pri;
+ struct list_head list;
+};
+
+struct irdma_stats_inst_info {
+ bool use_hmc_fcn_index;
+ u8 hmc_fn_id;
+ u8 stats_idx;
+};
+
+struct irdma_up_info {
+ u8 map[8];
+ u8 cnp_up_override;
+ u8 hmc_fcn_idx;
+ bool use_vlan:1;
+ bool use_cnp_up_override:1;
+};
+
+#define IRDMA_MAX_WS_NODES 0x3FF
+#define IRDMA_WS_NODE_INVALID 0xFFFF
+
+struct irdma_ws_node_info {
+ u16 id;
+ u16 vsi;
+ u16 parent_id;
+ u16 qs_handle;
+ bool type_leaf:1;
+ bool enable:1;
+ u8 prio_type;
+ u8 tc;
+ u8 weight;
+};
+
+struct irdma_hmc_fpm_misc {
+ u32 max_ceqs;
+ u32 max_sds;
+ u32 xf_block_size;
+ u32 q1_block_size;
+ u32 ht_multiplier;
+ u32 timer_bucket;
+ u32 rrf_block_size;
+ u32 ooiscf_block_size;
+};
+
+#define IRDMA_LEAF_DEFAULT_REL_BW 64
+#define IRDMA_PARENT_DEFAULT_REL_BW 1
+
+struct irdma_qos {
+ struct list_head qplist;
+ struct mutex qos_mutex; /* protect QoS attributes per QoS level */
+ u64 lan_qos_handle;
+ u32 l2_sched_node_id;
+ u16 qs_handle;
+ u8 traffic_class;
+ u8 rel_bw;
+ u8 prio_type;
+ bool valid;
+};
+
+#define IRDMA_INVALID_FCN_ID 0xff
+struct irdma_sc_vsi {
+ u16 vsi_idx;
+ struct irdma_sc_dev *dev;
+ void *back_vsi;
+ u32 ilq_count;
+ struct irdma_virt_mem ilq_mem;
+ struct irdma_puda_rsrc *ilq;
+ u32 ieq_count;
+ struct irdma_virt_mem ieq_mem;
+ struct irdma_puda_rsrc *ieq;
+ u32 exception_lan_q;
+ u16 mtu;
+ u16 vm_id;
+ u8 fcn_id;
+ enum irdma_vm_vf_type vm_vf_type;
+ bool stats_fcn_id_alloc:1;
+ bool tc_change_pending:1;
+ struct irdma_qos qos[IRDMA_MAX_USER_PRIORITY];
+ struct irdma_vsi_pestat *pestat;
+ atomic_t qp_suspend_reqs;
+ enum irdma_status_code (*register_qset)(struct irdma_sc_vsi *vsi,
+ struct irdma_ws_node *tc_node);
+ void (*unregister_qset)(struct irdma_sc_vsi *vsi,
+ struct irdma_ws_node *tc_node);
+ u8 qos_rel_bw;
+ u8 qos_prio_type;
+};
+
+struct irdma_sc_dev {
+ struct list_head cqp_cmd_head; /* head of the CQP command list */
+ spinlock_t cqp_lock; /* protect CQP list access */
+ bool fcn_id_array[IRDMA_MAX_STATS_COUNT];
+ struct irdma_dma_mem vf_fpm_query_buf[IRDMA_MAX_PE_ENA_VF_COUNT];
+ u64 fpm_query_buf_pa;
+ u64 fpm_commit_buf_pa;
+ __le64 *fpm_query_buf;
+ __le64 *fpm_commit_buf;
+ struct irdma_hw *hw;
+ u8 __iomem *db_addr;
+ u32 __iomem *wqe_alloc_db;
+ u32 __iomem *cq_arm_db;
+ u32 __iomem *aeq_alloc_db;
+ u32 __iomem *cqp_db;
+ u32 __iomem *cq_ack_db;
+ u32 __iomem *ceq_itr_mask_db;
+ u32 __iomem *aeq_itr_mask_db;
+ u32 __iomem *hw_regs[IRDMA_MAX_REGS];
+ u32 ceq_itr; /* Interrupt throttle, usecs between interrupts: 0 disabled. 2 - 8160 */
+ u64 hw_masks[IRDMA_MAX_MASKS];
+ u64 hw_shifts[IRDMA_MAX_SHIFTS];
+ u64 hw_stats_regs_32[IRDMA_HW_STAT_INDEX_MAX_32];
+ u64 hw_stats_regs_64[IRDMA_HW_STAT_INDEX_MAX_64];
+ u64 feature_info[IRDMA_MAX_FEATURES];
+ u64 cqp_cmd_stats[IRDMA_MAX_CQP_OPS];
+ struct irdma_hw_attrs hw_attrs;
+ struct irdma_hmc_info *hmc_info;
+ struct irdma_sc_cqp *cqp;
+ struct irdma_sc_aeq *aeq;
+ struct irdma_sc_ceq *ceq[IRDMA_CEQ_MAX_COUNT];
+ struct irdma_sc_cq *ccq;
+ const struct irdma_irq_ops *irq_ops;
+ struct irdma_hmc_fpm_misc hmc_fpm_misc;
+ struct irdma_ws_node *ws_tree_root;
+ struct mutex ws_mutex; /* ws tree mutex */
+ u16 num_vfs;
+ u8 hmc_fn_id;
+ u8 vf_id;
+ bool vchnl_up:1;
+ bool ceq_valid:1;
+ u8 pci_rev;
+ enum irdma_status_code (*ws_add)(struct irdma_sc_vsi *vsi, u8 user_pri);
+ void (*ws_remove)(struct irdma_sc_vsi *vsi, u8 user_pri);
+ void (*ws_reset)(struct irdma_sc_vsi *vsi);
+};
+
+struct irdma_modify_cq_info {
+ u64 cq_pa;
+ struct irdma_cqe *cq_base;
+ u32 cq_size;
+ u32 shadow_read_threshold;
+ u8 pbl_chunk_size;
+ u32 first_pm_pbl_idx;
+ bool virtual_map:1;
+ bool check_overflow;
+ bool cq_resize:1;
+};
+
+struct irdma_create_qp_info {
+ bool ord_valid:1;
+ bool tcp_ctx_valid:1;
+ bool cq_num_valid:1;
+ bool arp_cache_idx_valid:1;
+ bool mac_valid:1;
+ bool force_lpb;
+ u8 next_iwarp_state;
+};
+
+struct irdma_modify_qp_info {
+ u64 rx_win0;
+ u64 rx_win1;
+ u16 new_mss;
+ u8 next_iwarp_state;
+ u8 curr_iwarp_state;
+ u8 termlen;
+ bool ord_valid:1;
+ bool tcp_ctx_valid:1;
+ bool udp_ctx_valid:1;
+ bool cq_num_valid:1;
+ bool arp_cache_idx_valid:1;
+ bool reset_tcp_conn:1;
+ bool remove_hash_idx:1;
+ bool dont_send_term:1;
+ bool dont_send_fin:1;
+ bool cached_var_valid:1;
+ bool mss_change:1;
+ bool force_lpb:1;
+ bool mac_valid:1;
+};
+
+struct irdma_ccq_cqe_info {
+ struct irdma_sc_cqp *cqp;
+ u64 scratch;
+ u32 op_ret_val;
+ u16 maj_err_code;
+ u16 min_err_code;
+ u8 op_code;
+ bool error;
+};
+
+struct irdma_dcb_app_info {
+ u8 priority;
+ u8 selector;
+ u16 prot_id;
+};
+
+struct irdma_qos_tc_info {
+ u64 tc_ctx;
+ u8 rel_bw;
+ u8 prio_type;
+ u8 egress_virt_up;
+ u8 ingress_virt_up;
+};
+
+struct irdma_l2params {
+ struct irdma_qos_tc_info tc_info[IRDMA_MAX_USER_PRIORITY];
+ struct irdma_dcb_app_info apps[IRDMA_MAX_APPS];
+ u32 num_apps;
+ u16 qs_handle_list[IRDMA_MAX_USER_PRIORITY];
+ u16 mtu;
+ u8 up2tc[IRDMA_MAX_USER_PRIORITY];
+ u8 num_tc;
+ u8 vsi_rel_bw;
+ u8 vsi_prio_type;
+ bool mtu_changed:1;
+ bool tc_changed:1;
+};
+
+struct irdma_vsi_init_info {
+ struct irdma_sc_dev *dev;
+ void *back_vsi;
+ struct irdma_l2params *params;
+ u16 exception_lan_q;
+ u16 pf_data_vsi_num;
+ enum irdma_vm_vf_type vm_vf_type;
+ u16 vm_id;
+ enum irdma_status_code (*register_qset)(struct irdma_sc_vsi *vsi,
+ struct irdma_ws_node *tc_node);
+ void (*unregister_qset)(struct irdma_sc_vsi *vsi,
+ struct irdma_ws_node *tc_node);
+};
+
+struct irdma_vsi_stats_info {
+ struct irdma_vsi_pestat *pestat;
+ u8 fcn_id;
+ bool alloc_fcn_id;
+};
+
+struct irdma_device_init_info {
+ u64 fpm_query_buf_pa;
+ u64 fpm_commit_buf_pa;
+ __le64 *fpm_query_buf;
+ __le64 *fpm_commit_buf;
+ struct irdma_hw *hw;
+ void __iomem *bar0;
+ u8 hmc_fn_id;
+};
+
+struct irdma_ceq_init_info {
+ u64 ceqe_pa;
+ struct irdma_sc_dev *dev;
+ u64 *ceqe_base;
+ void *pbl_list;
+ u32 elem_cnt;
+ u32 ceq_id;
+ bool virtual_map:1;
+ bool tph_en:1;
+ bool itr_no_expire:1;
+ u8 pbl_chunk_size;
+ u8 tph_val;
+ u32 first_pm_pbl_idx;
+ struct irdma_sc_vsi *vsi;
+ struct irdma_sc_cq **reg_cq;
+ u32 reg_cq_idx;
+};
+
+struct irdma_aeq_init_info {
+ u64 aeq_elem_pa;
+ struct irdma_sc_dev *dev;
+ u32 *aeqe_base;
+ void *pbl_list;
+ u32 elem_cnt;
+ bool virtual_map;
+ u8 pbl_chunk_size;
+ u32 first_pm_pbl_idx;
+ u32 msix_idx;
+};
+
+struct irdma_ccq_init_info {
+ u64 cq_pa;
+ u64 shadow_area_pa;
+ struct irdma_sc_dev *dev;
+ struct irdma_cqe *cq_base;
+ __le64 *shadow_area;
+ void *pbl_list;
+ u32 num_elem;
+ u32 ceq_id;
+ u32 shadow_read_threshold;
+ bool ceqe_mask:1;
+ bool ceq_id_valid:1;
+ bool avoid_mem_cflct:1;
+ bool virtual_map:1;
+ bool tph_en:1;
+ u8 tph_val;
+ u8 pbl_chunk_size;
+ u32 first_pm_pbl_idx;
+ struct irdma_sc_vsi *vsi;
+};
+
+struct irdma_udp_offload_info {
+ bool ipv4:1;
+ bool insert_vlan_tag:1;
+ u8 ttl;
+ u8 tos;
+ u16 src_port;
+ u16 dst_port;
+ u32 dest_ip_addr[4];
+ u32 snd_mss;
+ u16 vlan_tag;
+ u16 arp_idx;
+ u32 flow_label;
+ u8 udp_state;
+ u32 psn_nxt;
+ u32 lsn;
+ u32 epsn;
+ u32 psn_max;
+ u32 psn_una;
+ u32 local_ipaddr[4];
+ u32 cwnd;
+ u8 rexmit_thresh;
+ u8 rnr_nak_thresh;
+};
+
+struct irdma_roce_offload_info {
+ u16 p_key;
+ u16 err_rq_idx;
+ u32 qkey;
+ u32 dest_qp;
+ u32 local_qp;
+ u8 roce_tver;
+ u8 ack_credits;
+ u8 err_rq_idx_valid;
+ u32 pd_id;
+ u16 ord_size;
+ u16 ird_size;
+ bool is_qp1:1;
+ bool udprivcq_en:1;
+ bool dcqcn_en:1;
+ bool rcv_no_icrc:1;
+ bool wr_rdresp_en:1;
+ bool bind_en:1;
+ bool fast_reg_en:1;
+ bool priv_mode_en:1;
+ bool rd_en:1;
+ bool timely_en:1;
+ bool dctcp_en:1;
+ bool fw_cc_enable:1;
+ bool use_stats_inst:1;
+ u16 t_high;
+ u16 t_low;
+ u8 last_byte_sent;
+ u8 mac_addr[ETH_ALEN];
+ u8 rtomin;
+};
+
+struct irdma_iwarp_offload_info {
+ u16 rcv_mark_offset;
+ u16 snd_mark_offset;
+ u8 ddp_ver;
+ u8 rdmap_ver;
+ u8 iwarp_mode;
+ u16 err_rq_idx;
+ u32 pd_id;
+ u16 ord_size;
+ u16 ird_size;
+ bool ib_rd_en:1;
+ bool align_hdrs:1;
+ bool rcv_no_mpa_crc:1;
+ bool err_rq_idx_valid:1;
+ bool snd_mark_en:1;
+ bool rcv_mark_en:1;
+ bool wr_rdresp_en:1;
+ bool bind_en:1;
+ bool fast_reg_en:1;
+ bool priv_mode_en:1;
+ bool rd_en:1;
+ bool timely_en:1;
+ bool use_stats_inst:1;
+ bool ecn_en:1;
+ bool dctcp_en:1;
+ u16 t_high;
+ u16 t_low;
+ u8 last_byte_sent;
+ u8 mac_addr[ETH_ALEN];
+ u8 rtomin;
+};
+
+struct irdma_tcp_offload_info {
+ bool ipv4:1;
+ bool no_nagle:1;
+ bool insert_vlan_tag:1;
+ bool time_stamp:1;
+ bool drop_ooo_seg:1;
+ bool avoid_stretch_ack:1;
+ bool wscale:1;
+ bool ignore_tcp_opt:1;
+ bool ignore_tcp_uns_opt:1;
+ u8 cwnd_inc_limit;
+ u8 dup_ack_thresh;
+ u8 ttl;
+ u8 src_mac_addr_idx;
+ u8 tos;
+ u16 src_port;
+ u16 dst_port;
+ u32 dest_ip_addr[4];
+ //u32 dest_ip_addr0;
+ //u32 dest_ip_addr1;
+ //u32 dest_ip_addr2;
+ //u32 dest_ip_addr3;
+ u32 snd_mss;
+ u16 syn_rst_handling;
+ u16 vlan_tag;
+ u16 arp_idx;
+ u32 flow_label;
+ u8 tcp_state;
+ u8 snd_wscale;
+ u8 rcv_wscale;
+ u32 time_stamp_recent;
+ u32 time_stamp_age;
+ u32 snd_nxt;
+ u32 snd_wnd;
+ u32 rcv_nxt;
+ u32 rcv_wnd;
+ u32 snd_max;
+ u32 snd_una;
+ u32 srtt;
+ u32 rtt_var;
+ u32 ss_thresh;
+ u32 cwnd;
+ u32 snd_wl1;
+ u32 snd_wl2;
+ u32 max_snd_window;
+ u8 rexmit_thresh;
+ u32 local_ipaddr[4];
+};
+
+struct irdma_qp_host_ctx_info {
+ u64 qp_compl_ctx;
+ union {
+ struct irdma_tcp_offload_info *tcp_info;
+ struct irdma_udp_offload_info *udp_info;
+ };
+ union {
+ struct irdma_iwarp_offload_info *iwarp_info;
+ struct irdma_roce_offload_info *roce_info;
+ };
+ u32 send_cq_num;
+ u32 rcv_cq_num;
+ u32 rem_endpoint_idx;
+ u8 stats_idx;
+ bool srq_valid:1;
+ bool tcp_info_valid:1;
+ bool iwarp_info_valid:1;
+ bool stats_idx_valid:1;
+ u8 user_pri;
+};
+
+struct irdma_aeqe_info {
+ u64 compl_ctx;
+ u32 qp_cq_id;
+ u16 ae_id;
+ u16 wqe_idx;
+ u8 tcp_state;
+ u8 iwarp_state;
+ bool qp:1;
+ bool cq:1;
+ bool sq:1;
+ bool rq:1;
+ bool in_rdrsp_wr:1;
+ bool out_rdrsp:1;
+ bool aeqe_overflow:1;
+ u8 q2_data_written;
+ u8 ae_src;
+};
+
+struct irdma_allocate_stag_info {
+ u64 total_len;
+ u64 first_pm_pbl_idx;
+ u32 chunk_size;
+ u32 stag_idx;
+ u32 page_size;
+ u32 pd_id;
+ u16 access_rights;
+ bool remote_access:1;
+ bool use_hmc_fcn_index:1;
+ bool use_pf_rid:1;
+ u8 hmc_fcn_index;
+};
+
+struct irdma_mw_alloc_info {
+ u32 mw_stag_index;
+ u32 page_size;
+ u32 pd_id;
+ bool remote_access:1;
+ bool mw_wide:1;
+ bool mw1_bind_dont_vldt_key:1;
+};
+
+struct irdma_reg_ns_stag_info {
+ u64 reg_addr_pa;
+ u64 va;
+ u64 total_len;
+ u32 page_size;
+ u32 chunk_size;
+ u32 first_pm_pbl_index;
+ enum irdma_addressing_type addr_type;
+ irdma_stag_index stag_idx;
+ u16 access_rights;
+ u32 pd_id;
+ irdma_stag_key stag_key;
+ bool use_hmc_fcn_index:1;
+ u8 hmc_fcn_index;
+ bool use_pf_rid:1;
+};
+
+struct irdma_fast_reg_stag_info {
+ u64 wr_id;
+ u64 reg_addr_pa;
+ u64 fbo;
+ void *va;
+ u64 total_len;
+ u32 page_size;
+ u32 chunk_size;
+ u32 first_pm_pbl_index;
+ enum irdma_addressing_type addr_type;
+ irdma_stag_index stag_idx;
+ u16 access_rights;
+ u32 pd_id;
+ irdma_stag_key stag_key;
+ bool local_fence:1;
+ bool read_fence:1;
+ bool signaled:1;
+ bool push_wqe:1;
+ bool use_hmc_fcn_index:1;
+ u8 hmc_fcn_index;
+ bool use_pf_rid:1;
+ bool defer_flag:1;
+};
+
+struct irdma_dealloc_stag_info {
+ u32 stag_idx;
+ u32 pd_id;
+ bool mr:1;
+ bool dealloc_pbl:1;
+};
+
+struct irdma_register_shared_stag {
+ u64 va;
+ enum irdma_addressing_type addr_type;
+ irdma_stag_index new_stag_idx;
+ irdma_stag_index parent_stag_idx;
+ u32 access_rights;
+ u32 pd_id;
+ u32 page_size;
+ irdma_stag_key new_stag_key;
+};
+
+struct irdma_qp_init_info {
+ struct irdma_qp_uk_init_info qp_uk_init_info;
+ struct irdma_sc_pd *pd;
+ struct irdma_sc_vsi *vsi;
+ __le64 *host_ctx;
+ u8 *q2;
+ u64 sq_pa;
+ u64 rq_pa;
+ u64 host_ctx_pa;
+ u64 q2_pa;
+ u64 shadow_area_pa;
+ u8 sq_tph_val;
+ u8 rq_tph_val;
+ bool sq_tph_en:1;
+ bool rq_tph_en:1;
+ bool rcv_tph_en:1;
+ bool xmit_tph_en:1;
+ bool virtual_map:1;
+};
+
+struct irdma_cq_init_info {
+ struct irdma_sc_dev *dev;
+ u64 cq_base_pa;
+ u64 shadow_area_pa;
+ u32 ceq_id;
+ u32 shadow_read_threshold;
+ u8 pbl_chunk_size;
+ u32 first_pm_pbl_idx;
+ bool virtual_map:1;
+ bool ceqe_mask:1;
+ bool ceq_id_valid:1;
+ bool tph_en:1;
+ u8 tph_val;
+ u8 type;
+ struct irdma_cq_uk_init_info cq_uk_init_info;
+ struct irdma_sc_vsi *vsi;
+};
+
+struct irdma_upload_context_info {
+ u64 buf_pa;
+ u32 qp_id;
+ u8 qp_type;
+ bool freeze_qp:1;
+ bool raw_format:1;
+};
+
+struct irdma_local_mac_entry_info {
+ u8 mac_addr[6];
+ u16 entry_idx;
+};
+
+struct irdma_add_arp_cache_entry_info {
+ u8 mac_addr[ETH_ALEN];
+ u32 reach_max;
+ u16 arp_index;
+ bool permanent;
+};
+
+struct irdma_apbvt_info {
+ u16 port;
+ bool add;
+};
+
+struct irdma_qhash_table_info {
+ struct irdma_sc_vsi *vsi;
+ enum irdma_quad_hash_manage_type manage;
+ enum irdma_quad_entry_type entry_type;
+ bool vlan_valid:1;
+ bool ipv4_valid:1;
+ u8 mac_addr[ETH_ALEN];
+ u16 vlan_id;
+ u8 user_pri;
+ u32 qp_num;
+ u32 dest_ip[4];
+ u32 src_ip[4];
+ u16 dest_port;
+ u16 src_port;
+};
+
+struct irdma_cqp_manage_push_page_info {
+ u32 push_idx;
+ u16 qs_handle;
+ u8 free_page;
+ u8 push_page_type;
+};
+
+struct irdma_qp_flush_info {
+ u16 sq_minor_code;
+ u16 sq_major_code;
+ u16 rq_minor_code;
+ u16 rq_major_code;
+ u16 ae_code;
+ u8 ae_src;
+ bool sq:1;
+ bool rq:1;
+ bool userflushcode:1;
+ bool generate_ae:1;
+};
+
+struct irdma_gen_ae_info {
+ u16 ae_code;
+ u8 ae_src;
+};
+
+struct irdma_cqp_timeout {
+ u64 compl_cqp_cmds;
+ u32 count;
+};
+
+struct irdma_irq_ops {
+ void (*irdma_cfg_aeq)(struct irdma_sc_dev *dev, u32 idx, bool enable);
+ void (*irdma_cfg_ceq)(struct irdma_sc_dev *dev, u32 ceq_id, u32 idx,
+ bool enable);
+ void (*irdma_dis_irq)(struct irdma_sc_dev *dev, u32 idx);
+ void (*irdma_en_irq)(struct irdma_sc_dev *dev, u32 idx);
+};
+
+void irdma_sc_ccq_arm(struct irdma_sc_cq *ccq);
+enum irdma_status_code irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch,
+ bool check_overflow, bool post_sq);
+enum irdma_status_code irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch,
+ bool post_sq);
+enum irdma_status_code irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
+ struct irdma_ccq_cqe_info *info);
+enum irdma_status_code irdma_sc_ccq_init(struct irdma_sc_cq *ccq,
+ struct irdma_ccq_init_info *info);
+
+enum irdma_status_code irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch);
+enum irdma_status_code irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq);
+
+enum irdma_status_code irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch,
+ bool post_sq);
+enum irdma_status_code irdma_sc_ceq_init(struct irdma_sc_ceq *ceq,
+ struct irdma_ceq_init_info *info);
+void irdma_sc_cleanup_ceqes(struct irdma_sc_cq *cq, struct irdma_sc_ceq *ceq);
+void *irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq);
+
+enum irdma_status_code irdma_sc_aeq_init(struct irdma_sc_aeq *aeq,
+ struct irdma_aeq_init_info *info);
+enum irdma_status_code irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
+ struct irdma_aeqe_info *info);
+enum irdma_status_code irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev,
+ u32 count);
+
+void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id,
+ int abi_ver);
+void irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable);
+void irdma_check_cqp_progress(struct irdma_cqp_timeout *cqp_timeout,
+ struct irdma_sc_dev *dev);
+enum irdma_status_code irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err,
+ u16 *min_err);
+enum irdma_status_code irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp);
+enum irdma_status_code irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
+ struct irdma_cqp_init_info *info);
+void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp);
+enum irdma_status_code irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 opcode,
+ struct irdma_ccq_cqe_info *cmpl_info);
+enum irdma_status_code irdma_sc_fast_register(struct irdma_sc_qp *qp,
+ struct irdma_fast_reg_stag_info *info,
+ bool post_sq);
+enum irdma_status_code irdma_sc_qp_create(struct irdma_sc_qp *qp,
+ struct irdma_create_qp_info *info,
+ u64 scratch, bool post_sq);
+enum irdma_status_code irdma_sc_qp_destroy(struct irdma_sc_qp *qp,
+ u64 scratch, bool remove_hash_idx,
+ bool ignore_mw_bnd, bool post_sq);
+enum irdma_status_code irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
+ struct irdma_qp_flush_info *info,
+ u64 scratch, bool post_sq);
+enum irdma_status_code irdma_sc_qp_init(struct irdma_sc_qp *qp,
+ struct irdma_qp_init_info *info);
+enum irdma_status_code irdma_sc_qp_modify(struct irdma_sc_qp *qp,
+ struct irdma_modify_qp_info *info,
+ u64 scratch, bool post_sq);
+void irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size,
+ irdma_stag stag);
+void irdma_sc_send_lsmm_nostag(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size);
+void irdma_sc_send_rtt(struct irdma_sc_qp *qp, bool read);
+void irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 *qp_ctx,
+ struct irdma_qp_host_ctx_info *info);
+void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx,
+ struct irdma_qp_host_ctx_info *info);
+enum irdma_status_code irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch,
+ bool post_sq);
+enum irdma_status_code irdma_sc_cq_init(struct irdma_sc_cq *cq,
+ struct irdma_cq_init_info *info);
+void irdma_sc_cq_resize(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info);
+enum irdma_status_code irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp,
+ u64 scratch, u8 hmc_fn_id,
+ bool post_sq, bool poll_registers);
+
+void sc_vsi_update_stats(struct irdma_sc_vsi *vsi);
+struct cqp_info {
+ union {
+ struct {
+ struct irdma_sc_qp *qp;
+ struct irdma_create_qp_info info;
+ u64 scratch;
+ } qp_create;
+
+ struct {
+ struct irdma_sc_qp *qp;
+ struct irdma_modify_qp_info info;
+ u64 scratch;
+ } qp_modify;
+
+ struct {
+ struct irdma_sc_qp *qp;
+ u64 scratch;
+ bool remove_hash_idx;
+ bool ignore_mw_bnd;
+ } qp_destroy;
+
+ struct {
+ struct irdma_sc_cq *cq;
+ u64 scratch;
+ bool check_overflow;
+ } cq_create;
+
+ struct {
+ struct irdma_sc_cq *cq;
+ struct irdma_modify_cq_info info;
+ u64 scratch;
+ } cq_modify;
+
+ struct {
+ struct irdma_sc_cq *cq;
+ u64 scratch;
+ } cq_destroy;
+
+ struct {
+ struct irdma_sc_dev *dev;
+ struct irdma_allocate_stag_info info;
+ u64 scratch;
+ } alloc_stag;
+
+ struct {
+ struct irdma_sc_dev *dev;
+ struct irdma_mw_alloc_info info;
+ u64 scratch;
+ } mw_alloc;
+
+ struct {
+ struct irdma_sc_dev *dev;
+ struct irdma_reg_ns_stag_info info;
+ u64 scratch;
+ } mr_reg_non_shared;
+
+ struct {
+ struct irdma_sc_dev *dev;
+ struct irdma_dealloc_stag_info info;
+ u64 scratch;
+ } dealloc_stag;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_add_arp_cache_entry_info info;
+ u64 scratch;
+ } add_arp_cache_entry;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ u64 scratch;
+ u16 arp_index;
+ } del_arp_cache_entry;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_local_mac_entry_info info;
+ u64 scratch;
+ } add_local_mac_entry;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ u64 scratch;
+ u8 entry_idx;
+ u8 ignore_ref_count;
+ } del_local_mac_entry;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ u64 scratch;
+ } alloc_local_mac_entry;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_cqp_manage_push_page_info info;
+ u64 scratch;
+ } manage_push_page;
+
+ struct {
+ struct irdma_sc_dev *dev;
+ struct irdma_upload_context_info info;
+ u64 scratch;
+ } qp_upload_context;
+
+ struct {
+ struct irdma_sc_dev *dev;
+ struct irdma_hmc_fcn_info info;
+ u64 scratch;
+ } manage_hmc_pm;
+
+ struct {
+ struct irdma_sc_ceq *ceq;
+ u64 scratch;
+ } ceq_create;
+
+ struct {
+ struct irdma_sc_ceq *ceq;
+ u64 scratch;
+ } ceq_destroy;
+
+ struct {
+ struct irdma_sc_aeq *aeq;
+ u64 scratch;
+ } aeq_create;
+
+ struct {
+ struct irdma_sc_aeq *aeq;
+ u64 scratch;
+ } aeq_destroy;
+
+ struct {
+ struct irdma_sc_qp *qp;
+ struct irdma_qp_flush_info info;
+ u64 scratch;
+ } qp_flush_wqes;
+
+ struct {
+ struct irdma_sc_qp *qp;
+ struct irdma_gen_ae_info info;
+ u64 scratch;
+ } gen_ae;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ void *fpm_val_va;
+ u64 fpm_val_pa;
+ u8 hmc_fn_id;
+ u64 scratch;
+ } query_fpm_val;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ void *fpm_val_va;
+ u64 fpm_val_pa;
+ u8 hmc_fn_id;
+ u64 scratch;
+ } commit_fpm_val;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_apbvt_info info;
+ u64 scratch;
+ } manage_apbvt_entry;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_qhash_table_info info;
+ u64 scratch;
+ } manage_qhash_table_entry;
+
+ struct {
+ struct irdma_sc_dev *dev;
+ struct irdma_update_sds_info info;
+ u64 scratch;
+ } update_pe_sds;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_sc_qp *qp;
+ u64 scratch;
+ } suspend_resume;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_ah_info info;
+ u64 scratch;
+ } ah_create;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_ah_info info;
+ u64 scratch;
+ } ah_destroy;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_mcast_grp_info info;
+ u64 scratch;
+ } mc_create;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_mcast_grp_info info;
+ u64 scratch;
+ } mc_destroy;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_mcast_grp_info info;
+ u64 scratch;
+ } mc_modify;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_stats_inst_info info;
+ u64 scratch;
+ } stats_manage;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_stats_gather_info info;
+ u64 scratch;
+ } stats_gather;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_ws_node_info info;
+ u64 scratch;
+ } ws_node;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_up_info info;
+ u64 scratch;
+ } up_map;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_dma_mem query_buff_mem;
+ u64 scratch;
+ } query_rdma;
+ } u;
+};
+
+struct cqp_cmds_info {
+ struct list_head cqp_cmd_entry;
+ u8 cqp_cmd;
+ u8 post_sq;
+ struct cqp_info in;
+};
+
+__le64 *irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch,
+ u32 *wqe_idx);
+
+/**
+ * irdma_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
+ * @cqp: struct for cqp hw
+ * @scratch: private data for CQP WQE
+ */
+static inline __le64 *irdma_sc_cqp_get_next_send_wqe(struct irdma_sc_cqp *cqp, u64 scratch)
+{
+ u32 wqe_idx;
+
+ return irdma_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
+}
+#endif /* IRDMA_TYPE_H */
diff --git a/drivers/infiniband/hw/irdma/uda.c b/drivers/infiniband/hw/irdma/uda.c
new file mode 100644
index 000000000000..f5b1b6150cdc
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/uda.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2016 - 2021 Intel Corporation */
+#include "osdep.h"
+#include "status.h"
+#include "hmc.h"
+#include "defs.h"
+#include "type.h"
+#include "protos.h"
+#include "uda.h"
+#include "uda_d.h"
+
+/**
+ * irdma_sc_access_ah() - Create, modify or delete AH
+ * @cqp: struct for cqp hw
+ * @info: ah information
+ * @op: Operation
+ * @scratch: u64 saved to be used during cqp completion
+ */
+enum irdma_status_code irdma_sc_access_ah(struct irdma_sc_cqp *cqp,
+ struct irdma_ah_info *info,
+ u32 op, u64 scratch)
+{
+ __le64 *wqe;
+ u64 qw1, qw2;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return IRDMA_ERR_RING_FULL;
+
+ set_64bit_val(wqe, 0, ether_addr_to_u64(info->mac_addr) << 16);
+ qw1 = FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_PDINDEXLO, info->pd_idx) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_TC, info->tc_tos) |
+ FIELD_PREP(IRDMA_UDAQPC_VLANTAG, info->vlan_tag);
+
+ qw2 = FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ARPINDEX, info->dst_arpindex) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_FLOWLABEL, info->flow_label) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_HOPLIMIT, info->hop_ttl) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_PDINDEXHI, info->pd_idx >> 16);
+
+ if (!info->ipv4_valid) {
+ set_64bit_val(wqe, 40,
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR0, info->dest_ip_addr[0]) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR1, info->dest_ip_addr[1]));
+ set_64bit_val(wqe, 32,
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR2, info->dest_ip_addr[2]) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[3]));
+
+ set_64bit_val(wqe, 56,
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR0, info->src_ip_addr[0]) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR1, info->src_ip_addr[1]));
+ set_64bit_val(wqe, 48,
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR2, info->src_ip_addr[2]) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->src_ip_addr[3]));
+ } else {
+ set_64bit_val(wqe, 32,
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[0]));
+
+ set_64bit_val(wqe, 48,
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->src_ip_addr[0]));
+ }
+
+ set_64bit_val(wqe, 8, qw1);
+ set_64bit_val(wqe, 16, qw2);
+
+ dma_wmb(); /* need write block before writing WQE header */
+
+ set_64bit_val(
+ wqe, 24,
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_WQEVALID, cqp->polarity) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_OPCODE, op) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_DOLOOPBACKK, info->do_lpbk) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_IPV4VALID, info->ipv4_valid) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_AVIDX, info->ah_idx) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_INSERTVLANTAG, info->insert_vlan_tag));
+
+ print_hex_dump_debug("WQE: MANAGE_AH WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_create_mg_ctx() - create a mcg context
+ * @info: multicast group context info
+ */
+static enum irdma_status_code
+irdma_create_mg_ctx(struct irdma_mcast_grp_info *info)
+{
+ struct irdma_mcast_grp_ctx_entry_info *entry_info = NULL;
+ u8 idx = 0; /* index in the array */
+ u8 ctx_idx = 0; /* index in the MG context */
+
+ memset(info->dma_mem_mc.va, 0, IRDMA_MAX_MGS_PER_CTX * sizeof(u64));
+
+ for (idx = 0; idx < IRDMA_MAX_MGS_PER_CTX; idx++) {
+ entry_info = &info->mg_ctx_info[idx];
+ if (entry_info->valid_entry) {
+ set_64bit_val((__le64 *)info->dma_mem_mc.va,
+ ctx_idx * sizeof(u64),
+ FIELD_PREP(IRDMA_UDA_MGCTX_DESTPORT, entry_info->dest_port) |
+ FIELD_PREP(IRDMA_UDA_MGCTX_VALIDENT, entry_info->valid_entry) |
+ FIELD_PREP(IRDMA_UDA_MGCTX_QPID, entry_info->qp_id));
+ ctx_idx++;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_access_mcast_grp() - Access mcast group based on op
+ * @cqp: Control QP
+ * @info: multicast group context info
+ * @op: operation to perform
+ * @scratch: u64 saved to be used during cqp completion
+ */
+enum irdma_status_code irdma_access_mcast_grp(struct irdma_sc_cqp *cqp,
+ struct irdma_mcast_grp_info *info,
+ u32 op, u64 scratch)
+{
+ __le64 *wqe;
+ enum irdma_status_code ret_code = 0;
+
+ if (info->mg_id >= IRDMA_UDA_MAX_FSI_MGS) {
+ ibdev_dbg(to_ibdev(cqp->dev), "WQE: mg_id out of range\n");
+ return IRDMA_ERR_PARAM;
+ }
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe) {
+ ibdev_dbg(to_ibdev(cqp->dev), "WQE: ring full\n");
+ return IRDMA_ERR_RING_FULL;
+ }
+
+ ret_code = irdma_create_mg_ctx(info);
+ if (ret_code)
+ return ret_code;
+
+ set_64bit_val(wqe, 32, info->dma_mem_mc.pa);
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MG_VLANID, info->vlan_id) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_QS_HANDLE, info->qs_handle));
+ set_64bit_val(wqe, 0, ether_addr_to_u64(info->dest_mac_addr));
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MG_HMC_FCN_ID, info->hmc_fcn_id));
+
+ if (!info->ipv4_valid) {
+ set_64bit_val(wqe, 56,
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR0, info->dest_ip_addr[0]) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR1, info->dest_ip_addr[1]));
+ set_64bit_val(wqe, 48,
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR2, info->dest_ip_addr[2]) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[3]));
+ } else {
+ set_64bit_val(wqe, 48,
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[0]));
+ }
+
+ dma_wmb(); /* need write memory block before writing the WQE header. */
+
+ set_64bit_val(wqe, 24,
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MG_WQEVALID, cqp->polarity) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MG_OPCODE, op) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MG_MGIDX, info->mg_id) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MG_VLANVALID, info->vlan_valid) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MG_IPV4VALID, info->ipv4_valid));
+
+ print_hex_dump_debug("WQE: MANAGE_MCG WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ print_hex_dump_debug("WQE: MCG_HOST CTX WQE", DUMP_PREFIX_OFFSET, 16,
+ 8, info->dma_mem_mc.va,
+ IRDMA_MAX_MGS_PER_CTX * 8, false);
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_compare_mgs - Compares two multicast group structures
+ * @entry1: Multcast group info
+ * @entry2: Multcast group info in context
+ */
+static bool irdma_compare_mgs(struct irdma_mcast_grp_ctx_entry_info *entry1,
+ struct irdma_mcast_grp_ctx_entry_info *entry2)
+{
+ if (entry1->dest_port == entry2->dest_port &&
+ entry1->qp_id == entry2->qp_id)
+ return true;
+
+ return false;
+}
+
+/**
+ * irdma_sc_add_mcast_grp - Allocates mcast group entry in ctx
+ * @ctx: Multcast group context
+ * @mg: Multcast group info
+ */
+enum irdma_status_code irdma_sc_add_mcast_grp(struct irdma_mcast_grp_info *ctx,
+ struct irdma_mcast_grp_ctx_entry_info *mg)
+{
+ u32 idx;
+ bool free_entry_found = false;
+ u32 free_entry_idx = 0;
+
+ /* find either an identical or a free entry for a multicast group */
+ for (idx = 0; idx < IRDMA_MAX_MGS_PER_CTX; idx++) {
+ if (ctx->mg_ctx_info[idx].valid_entry) {
+ if (irdma_compare_mgs(&ctx->mg_ctx_info[idx], mg)) {
+ ctx->mg_ctx_info[idx].use_cnt++;
+ return 0;
+ }
+ continue;
+ }
+ if (!free_entry_found) {
+ free_entry_found = true;
+ free_entry_idx = idx;
+ }
+ }
+
+ if (free_entry_found) {
+ ctx->mg_ctx_info[free_entry_idx] = *mg;
+ ctx->mg_ctx_info[free_entry_idx].valid_entry = true;
+ ctx->mg_ctx_info[free_entry_idx].use_cnt = 1;
+ ctx->no_of_mgs++;
+ return 0;
+ }
+
+ return IRDMA_ERR_NO_MEMORY;
+}
+
+/**
+ * irdma_sc_del_mcast_grp - Delete mcast group
+ * @ctx: Multcast group context
+ * @mg: Multcast group info
+ *
+ * Finds and removes a specific mulicast group from context, all
+ * parameters must match to remove a multicast group.
+ */
+enum irdma_status_code irdma_sc_del_mcast_grp(struct irdma_mcast_grp_info *ctx,
+ struct irdma_mcast_grp_ctx_entry_info *mg)
+{
+ u32 idx;
+
+ /* find an entry in multicast group context */
+ for (idx = 0; idx < IRDMA_MAX_MGS_PER_CTX; idx++) {
+ if (!ctx->mg_ctx_info[idx].valid_entry)
+ continue;
+
+ if (irdma_compare_mgs(mg, &ctx->mg_ctx_info[idx])) {
+ ctx->mg_ctx_info[idx].use_cnt--;
+
+ if (!ctx->mg_ctx_info[idx].use_cnt) {
+ ctx->mg_ctx_info[idx].valid_entry = false;
+ ctx->no_of_mgs--;
+ /* Remove gap if element was not the last */
+ if (idx != ctx->no_of_mgs &&
+ ctx->no_of_mgs > 0) {
+ memcpy(&ctx->mg_ctx_info[idx],
+ &ctx->mg_ctx_info[ctx->no_of_mgs - 1],
+ sizeof(ctx->mg_ctx_info[idx]));
+ ctx->mg_ctx_info[ctx->no_of_mgs - 1].valid_entry = false;
+ }
+ }
+
+ return 0;
+ }
+ }
+
+ return IRDMA_ERR_PARAM;
+}
diff --git a/drivers/infiniband/hw/irdma/uda.h b/drivers/infiniband/hw/irdma/uda.h
new file mode 100644
index 000000000000..a4ad0367dc96
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/uda.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2016 - 2021 Intel Corporation */
+#ifndef IRDMA_UDA_H
+#define IRDMA_UDA_H
+
+#define IRDMA_UDA_MAX_FSI_MGS 4096
+#define IRDMA_UDA_MAX_PFS 16
+#define IRDMA_UDA_MAX_VFS 128
+
+struct irdma_sc_cqp;
+
+struct irdma_ah_info {
+ struct irdma_sc_vsi *vsi;
+ u32 pd_idx;
+ u32 dst_arpindex;
+ u32 dest_ip_addr[4];
+ u32 src_ip_addr[4];
+ u32 flow_label;
+ u32 ah_idx;
+ u16 vlan_tag;
+ u8 insert_vlan_tag;
+ u8 tc_tos;
+ u8 hop_ttl;
+ u8 mac_addr[ETH_ALEN];
+ bool ah_valid:1;
+ bool ipv4_valid:1;
+ bool do_lpbk:1;
+};
+
+struct irdma_sc_ah {
+ struct irdma_sc_dev *dev;
+ struct irdma_ah_info ah_info;
+};
+
+enum irdma_status_code irdma_sc_add_mcast_grp(struct irdma_mcast_grp_info *ctx,
+ struct irdma_mcast_grp_ctx_entry_info *mg);
+enum irdma_status_code irdma_sc_del_mcast_grp(struct irdma_mcast_grp_info *ctx,
+ struct irdma_mcast_grp_ctx_entry_info *mg);
+enum irdma_status_code irdma_sc_access_ah(struct irdma_sc_cqp *cqp, struct irdma_ah_info *info,
+ u32 op, u64 scratch);
+enum irdma_status_code irdma_access_mcast_grp(struct irdma_sc_cqp *cqp,
+ struct irdma_mcast_grp_info *info,
+ u32 op, u64 scratch);
+
+static inline void irdma_sc_init_ah(struct irdma_sc_dev *dev, struct irdma_sc_ah *ah)
+{
+ ah->dev = dev;
+}
+
+static inline enum irdma_status_code irdma_sc_create_ah(struct irdma_sc_cqp *cqp,
+ struct irdma_ah_info *info,
+ u64 scratch)
+{
+ return irdma_sc_access_ah(cqp, info, IRDMA_CQP_OP_CREATE_ADDR_HANDLE,
+ scratch);
+}
+
+static inline enum irdma_status_code irdma_sc_destroy_ah(struct irdma_sc_cqp *cqp,
+ struct irdma_ah_info *info,
+ u64 scratch)
+{
+ return irdma_sc_access_ah(cqp, info, IRDMA_CQP_OP_DESTROY_ADDR_HANDLE,
+ scratch);
+}
+
+static inline enum irdma_status_code irdma_sc_create_mcast_grp(struct irdma_sc_cqp *cqp,
+ struct irdma_mcast_grp_info *info,
+ u64 scratch)
+{
+ return irdma_access_mcast_grp(cqp, info, IRDMA_CQP_OP_CREATE_MCAST_GRP,
+ scratch);
+}
+
+static inline enum irdma_status_code irdma_sc_modify_mcast_grp(struct irdma_sc_cqp *cqp,
+ struct irdma_mcast_grp_info *info,
+ u64 scratch)
+{
+ return irdma_access_mcast_grp(cqp, info, IRDMA_CQP_OP_MODIFY_MCAST_GRP,
+ scratch);
+}
+
+static inline enum irdma_status_code irdma_sc_destroy_mcast_grp(struct irdma_sc_cqp *cqp,
+ struct irdma_mcast_grp_info *info,
+ u64 scratch)
+{
+ return irdma_access_mcast_grp(cqp, info, IRDMA_CQP_OP_DESTROY_MCAST_GRP,
+ scratch);
+}
+#endif /* IRDMA_UDA_H */
diff --git a/drivers/infiniband/hw/irdma/uda_d.h b/drivers/infiniband/hw/irdma/uda_d.h
new file mode 100644
index 000000000000..bfc81cac2c51
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/uda_d.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2016 - 2021 Intel Corporation */
+#ifndef IRDMA_UDA_D_H
+#define IRDMA_UDA_D_H
+
+/* L4 packet type */
+#define IRDMA_E_UDA_SQ_L4T_UNKNOWN 0
+#define IRDMA_E_UDA_SQ_L4T_TCP 1
+#define IRDMA_E_UDA_SQ_L4T_SCTP 2
+#define IRDMA_E_UDA_SQ_L4T_UDP 3
+
+/* Inner IP header type */
+#define IRDMA_E_UDA_SQ_IIPT_UNKNOWN 0
+#define IRDMA_E_UDA_SQ_IIPT_IPV6 1
+#define IRDMA_E_UDA_SQ_IIPT_IPV4_NO_CSUM 2
+#define IRDMA_E_UDA_SQ_IIPT_IPV4_CSUM 3
+#define IRDMA_UDA_QPSQ_PUSHWQE BIT_ULL(56)
+#define IRDMA_UDA_QPSQ_INLINEDATAFLAG BIT_ULL(57)
+#define IRDMA_UDA_QPSQ_INLINEDATALEN GENMASK_ULL(55, 48)
+#define IRDMA_UDA_QPSQ_ADDFRAGCNT GENMASK_ULL(41, 38)
+#define IRDMA_UDA_QPSQ_IPFRAGFLAGS GENMASK_ULL(43, 42)
+#define IRDMA_UDA_QPSQ_NOCHECKSUM BIT_ULL(45)
+#define IRDMA_UDA_QPSQ_AHIDXVALID BIT_ULL(46)
+#define IRDMA_UDA_QPSQ_LOCAL_FENCE BIT_ULL(61)
+#define IRDMA_UDA_QPSQ_AHIDX GENMASK_ULL(16, 0)
+#define IRDMA_UDA_QPSQ_PROTOCOL GENMASK_ULL(23, 16)
+#define IRDMA_UDA_QPSQ_EXTHDRLEN GENMASK_ULL(40, 32)
+#define IRDMA_UDA_QPSQ_MULTICAST BIT_ULL(63)
+#define IRDMA_UDA_QPSQ_MACLEN GENMASK_ULL(62, 56)
+#define IRDMA_UDA_QPSQ_MACLEN_LINE 2
+#define IRDMA_UDA_QPSQ_IPLEN GENMASK_ULL(54, 48)
+#define IRDMA_UDA_QPSQ_IPLEN_LINE 2
+#define IRDMA_UDA_QPSQ_L4T GENMASK_ULL(31, 30)
+#define IRDMA_UDA_QPSQ_L4T_LINE 2
+#define IRDMA_UDA_QPSQ_IIPT GENMASK_ULL(29, 28)
+#define IRDMA_UDA_QPSQ_IIPT_LINE 2
+
+#define IRDMA_UDA_QPSQ_DO_LPB_LINE 3
+#define IRDMA_UDA_QPSQ_FWD_PROG_CONFIRM BIT_ULL(45)
+#define IRDMA_UDA_QPSQ_FWD_PROG_CONFIRM_LINE 3
+#define IRDMA_UDA_QPSQ_IMMDATA GENMASK_ULL(63, 0)
+
+/* Byte Offset 0 */
+#define IRDMA_UDAQPC_IPV4_M BIT_ULL(3)
+#define IRDMA_UDAQPC_INSERTVLANTAG BIT_ULL(5)
+#define IRDMA_UDAQPC_ISQP1 BIT_ULL(6)
+
+#define IRDMA_UDAQPC_ECNENABLE BIT_ULL(14)
+#define IRDMA_UDAQPC_PDINDEXHI GENMASK_ULL(21, 20)
+#define IRDMA_UDAQPC_DCTCPENABLE BIT_ULL(25)
+
+#define IRDMA_UDAQPC_RCVTPHEN IRDMAQPC_RCVTPHEN
+#define IRDMA_UDAQPC_XMITTPHEN IRDMAQPC_XMITTPHEN
+#define IRDMA_UDAQPC_RQTPHEN IRDMAQPC_RQTPHEN
+#define IRDMA_UDAQPC_SQTPHEN IRDMAQPC_SQTPHEN
+#define IRDMA_UDAQPC_PPIDX IRDMAQPC_PPIDX
+#define IRDMA_UDAQPC_PMENA IRDMAQPC_PMENA
+#define IRDMA_UDAQPC_INSERTTAG2 BIT_ULL(11)
+#define IRDMA_UDAQPC_INSERTTAG3 BIT_ULL(14)
+
+#define IRDMA_UDAQPC_RQSIZE IRDMAQPC_RQSIZE
+#define IRDMA_UDAQPC_SQSIZE IRDMAQPC_SQSIZE
+#define IRDMA_UDAQPC_TXCQNUM IRDMAQPC_TXCQNUM
+#define IRDMA_UDAQPC_RXCQNUM IRDMAQPC_RXCQNUM
+#define IRDMA_UDAQPC_QPCOMPCTX IRDMAQPC_QPCOMPCTX
+#define IRDMA_UDAQPC_SQTPHVAL IRDMAQPC_SQTPHVAL
+#define IRDMA_UDAQPC_RQTPHVAL IRDMAQPC_RQTPHVAL
+#define IRDMA_UDAQPC_QSHANDLE IRDMAQPC_QSHANDLE
+#define IRDMA_UDAQPC_RQHDRRINGBUFSIZE GENMASK_ULL(49, 48)
+#define IRDMA_UDAQPC_SQHDRRINGBUFSIZE GENMASK_ULL(33, 32)
+#define IRDMA_UDAQPC_PRIVILEGEENABLE BIT_ULL(25)
+#define IRDMA_UDAQPC_USE_STATISTICS_INSTANCE BIT_ULL(26)
+#define IRDMA_UDAQPC_STATISTICS_INSTANCE_INDEX GENMASK_ULL(6, 0)
+#define IRDMA_UDAQPC_PRIVHDRGENENABLE BIT_ULL(0)
+#define IRDMA_UDAQPC_RQHDRSPLITENABLE BIT_ULL(3)
+#define IRDMA_UDAQPC_RQHDRRINGBUFENABLE BIT_ULL(2)
+#define IRDMA_UDAQPC_SQHDRRINGBUFENABLE BIT_ULL(1)
+#define IRDMA_UDAQPC_IPID GENMASK_ULL(47, 32)
+#define IRDMA_UDAQPC_SNDMSS GENMASK_ULL(29, 16)
+#define IRDMA_UDAQPC_VLANTAG GENMASK_ULL(15, 0)
+
+#define IRDMA_UDA_CQPSQ_MAV_PDINDEXHI GENMASK_ULL(21, 20)
+#define IRDMA_UDA_CQPSQ_MAV_PDINDEXLO GENMASK_ULL(63, 48)
+#define IRDMA_UDA_CQPSQ_MAV_SRCMACADDRINDEX GENMASK_ULL(29, 24)
+#define IRDMA_UDA_CQPSQ_MAV_ARPINDEX GENMASK_ULL(63, 48)
+#define IRDMA_UDA_CQPSQ_MAV_TC GENMASK_ULL(39, 32)
+#define IRDMA_UDA_CQPSQ_MAV_HOPLIMIT GENMASK_ULL(39, 32)
+#define IRDMA_UDA_CQPSQ_MAV_FLOWLABEL GENMASK_ULL(19, 0)
+#define IRDMA_UDA_CQPSQ_MAV_ADDR0 GENMASK_ULL(63, 32)
+#define IRDMA_UDA_CQPSQ_MAV_ADDR1 GENMASK_ULL(31, 0)
+#define IRDMA_UDA_CQPSQ_MAV_ADDR2 GENMASK_ULL(63, 32)
+#define IRDMA_UDA_CQPSQ_MAV_ADDR3 GENMASK_ULL(31, 0)
+#define IRDMA_UDA_CQPSQ_MAV_WQEVALID BIT_ULL(63)
+#define IRDMA_UDA_CQPSQ_MAV_OPCODE GENMASK_ULL(37, 32)
+#define IRDMA_UDA_CQPSQ_MAV_DOLOOPBACKK BIT_ULL(62)
+#define IRDMA_UDA_CQPSQ_MAV_IPV4VALID BIT_ULL(59)
+#define IRDMA_UDA_CQPSQ_MAV_AVIDX GENMASK_ULL(16, 0)
+#define IRDMA_UDA_CQPSQ_MAV_INSERTVLANTAG BIT_ULL(60)
+#define IRDMA_UDA_MGCTX_VFFLAG BIT_ULL(29)
+#define IRDMA_UDA_MGCTX_DESTPORT GENMASK_ULL(47, 32)
+#define IRDMA_UDA_MGCTX_VFID GENMASK_ULL(28, 22)
+#define IRDMA_UDA_MGCTX_VALIDENT BIT_ULL(31)
+#define IRDMA_UDA_MGCTX_PFID GENMASK_ULL(21, 18)
+#define IRDMA_UDA_MGCTX_FLAGIGNOREDPORT BIT_ULL(30)
+#define IRDMA_UDA_MGCTX_QPID GENMASK_ULL(17, 0)
+#define IRDMA_UDA_CQPSQ_MG_WQEVALID BIT_ULL(63)
+#define IRDMA_UDA_CQPSQ_MG_OPCODE GENMASK_ULL(37, 32)
+#define IRDMA_UDA_CQPSQ_MG_MGIDX GENMASK_ULL(12, 0)
+#define IRDMA_UDA_CQPSQ_MG_IPV4VALID BIT_ULL(60)
+#define IRDMA_UDA_CQPSQ_MG_VLANVALID BIT_ULL(59)
+#define IRDMA_UDA_CQPSQ_MG_HMC_FCN_ID GENMASK_ULL(5, 0)
+#define IRDMA_UDA_CQPSQ_MG_VLANID GENMASK_ULL(43, 32)
+#define IRDMA_UDA_CQPSQ_QS_HANDLE GENMASK_ULL(9, 0)
+#define IRDMA_UDA_CQPSQ_QHASH_QPN GENMASK_ULL(49, 32)
+#define IRDMA_UDA_CQPSQ_QHASH_ BIT_ULL(0)
+#define IRDMA_UDA_CQPSQ_QHASH_SRC_PORT GENMASK_ULL(31, 16)
+#define IRDMA_UDA_CQPSQ_QHASH_DEST_PORT GENMASK_ULL(15, 0)
+#define IRDMA_UDA_CQPSQ_QHASH_ADDR0 GENMASK_ULL(63, 32)
+#define IRDMA_UDA_CQPSQ_QHASH_ADDR1 GENMASK_ULL(31, 0)
+#define IRDMA_UDA_CQPSQ_QHASH_ADDR2 GENMASK_ULL(63, 32)
+#define IRDMA_UDA_CQPSQ_QHASH_ADDR3 GENMASK_ULL(31, 0)
+#define IRDMA_UDA_CQPSQ_QHASH_WQEVALID BIT_ULL(63)
+#define IRDMA_UDA_CQPSQ_QHASH_OPCODE GENMASK_ULL(37, 32)
+#define IRDMA_UDA_CQPSQ_QHASH_MANAGE GENMASK_ULL(62, 61)
+#define IRDMA_UDA_CQPSQ_QHASH_IPV4VALID GENMASK_ULL(60, 60)
+#define IRDMA_UDA_CQPSQ_QHASH_LANFWD GENMASK_ULL(59, 59)
+#define IRDMA_UDA_CQPSQ_QHASH_ENTRYTYPE GENMASK_ULL(44, 42)
+#endif /* IRDMA_UDA_D_H */
diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c
new file mode 100644
index 000000000000..a6d52c20091c
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/uk.c
@@ -0,0 +1,1684 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#include "osdep.h"
+#include "status.h"
+#include "defs.h"
+#include "user.h"
+#include "irdma.h"
+
+/**
+ * irdma_set_fragment - set fragment in wqe
+ * @wqe: wqe for setting fragment
+ * @offset: offset value
+ * @sge: sge length and stag
+ * @valid: The wqe valid
+ */
+static void irdma_set_fragment(__le64 *wqe, u32 offset, struct irdma_sge *sge,
+ u8 valid)
+{
+ if (sge) {
+ set_64bit_val(wqe, offset,
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->tag_off));
+ set_64bit_val(wqe, offset + 8,
+ FIELD_PREP(IRDMAQPSQ_VALID, valid) |
+ FIELD_PREP(IRDMAQPSQ_FRAG_LEN, sge->len) |
+ FIELD_PREP(IRDMAQPSQ_FRAG_STAG, sge->stag));
+ } else {
+ set_64bit_val(wqe, offset, 0);
+ set_64bit_val(wqe, offset + 8,
+ FIELD_PREP(IRDMAQPSQ_VALID, valid));
+ }
+}
+
+/**
+ * irdma_set_fragment_gen_1 - set fragment in wqe
+ * @wqe: wqe for setting fragment
+ * @offset: offset value
+ * @sge: sge length and stag
+ * @valid: wqe valid flag
+ */
+static void irdma_set_fragment_gen_1(__le64 *wqe, u32 offset,
+ struct irdma_sge *sge, u8 valid)
+{
+ if (sge) {
+ set_64bit_val(wqe, offset,
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->tag_off));
+ set_64bit_val(wqe, offset + 8,
+ FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, sge->len) |
+ FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, sge->stag));
+ } else {
+ set_64bit_val(wqe, offset, 0);
+ set_64bit_val(wqe, offset + 8, 0);
+ }
+}
+
+/**
+ * irdma_nop_1 - insert a NOP wqe
+ * @qp: hw qp ptr
+ */
+static enum irdma_status_code irdma_nop_1(struct irdma_qp_uk *qp)
+{
+ u64 hdr;
+ __le64 *wqe;
+ u32 wqe_idx;
+ bool signaled = false;
+
+ if (!qp->sq_ring.head)
+ return IRDMA_ERR_PARAM;
+
+ wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
+ wqe = qp->sq_base[wqe_idx].elem;
+
+ qp->sq_wrtrk_array[wqe_idx].quanta = IRDMA_QP_WQE_MIN_QUANTA;
+
+ set_64bit_val(wqe, 0, 0);
+ set_64bit_val(wqe, 8, 0);
+ set_64bit_val(wqe, 16, 0);
+
+ hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
+
+ /* make sure WQE is written before valid bit is set */
+ dma_wmb();
+
+ set_64bit_val(wqe, 24, hdr);
+
+ return 0;
+}
+
+/**
+ * irdma_clr_wqes - clear next 128 sq entries
+ * @qp: hw qp ptr
+ * @qp_wqe_idx: wqe_idx
+ */
+void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx)
+{
+ __le64 *wqe;
+ u32 wqe_idx;
+
+ if (!(qp_wqe_idx & 0x7F)) {
+ wqe_idx = (qp_wqe_idx + 128) % qp->sq_ring.size;
+ wqe = qp->sq_base[wqe_idx].elem;
+ if (wqe_idx)
+ memset(wqe, qp->swqe_polarity ? 0 : 0xFF, 0x1000);
+ else
+ memset(wqe, qp->swqe_polarity ? 0xFF : 0, 0x1000);
+ }
+}
+
+/**
+ * irdma_uk_qp_post_wr - ring doorbell
+ * @qp: hw qp ptr
+ */
+void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp)
+{
+ u64 temp;
+ u32 hw_sq_tail;
+ u32 sw_sq_head;
+
+ /* valid bit is written and loads completed before reading shadow */
+ mb();
+
+ /* read the doorbell shadow area */
+ get_64bit_val(qp->shadow_area, 0, &temp);
+
+ hw_sq_tail = (u32)FIELD_GET(IRDMA_QP_DBSA_HW_SQ_TAIL, temp);
+ sw_sq_head = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
+ if (sw_sq_head != qp->initial_ring.head) {
+ if (qp->push_dropped) {
+ writel(qp->qp_id, qp->wqe_alloc_db);
+ qp->push_dropped = false;
+ } else if (sw_sq_head != hw_sq_tail) {
+ if (sw_sq_head > qp->initial_ring.head) {
+ if (hw_sq_tail >= qp->initial_ring.head &&
+ hw_sq_tail < sw_sq_head)
+ writel(qp->qp_id, qp->wqe_alloc_db);
+ } else {
+ if (hw_sq_tail >= qp->initial_ring.head ||
+ hw_sq_tail < sw_sq_head)
+ writel(qp->qp_id, qp->wqe_alloc_db);
+ }
+ }
+ }
+
+ qp->initial_ring.head = qp->sq_ring.head;
+}
+
+/**
+ * irdma_qp_ring_push_db - ring qp doorbell
+ * @qp: hw qp ptr
+ * @wqe_idx: wqe index
+ */
+static void irdma_qp_ring_push_db(struct irdma_qp_uk *qp, u32 wqe_idx)
+{
+ set_32bit_val(qp->push_db, 0,
+ FIELD_PREP(IRDMA_WQEALLOC_WQE_DESC_INDEX, wqe_idx >> 3) | qp->qp_id);
+ qp->initial_ring.head = qp->sq_ring.head;
+ qp->push_mode = true;
+ qp->push_dropped = false;
+}
+
+void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta,
+ u32 wqe_idx, bool post_sq)
+{
+ __le64 *push;
+
+ if (IRDMA_RING_CURRENT_HEAD(qp->initial_ring) !=
+ IRDMA_RING_CURRENT_TAIL(qp->sq_ring) &&
+ !qp->push_mode) {
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
+ } else {
+ push = (__le64 *)((uintptr_t)qp->push_wqe +
+ (wqe_idx & 0x7) * 0x20);
+ memcpy(push, wqe, quanta * IRDMA_QP_WQE_MIN_SIZE);
+ irdma_qp_ring_push_db(qp, wqe_idx);
+ }
+}
+
+/**
+ * irdma_qp_get_next_send_wqe - pad with NOP if needed, return where next WR should go
+ * @qp: hw qp ptr
+ * @wqe_idx: return wqe index
+ * @quanta: size of WR in quanta
+ * @total_size: size of WR in bytes
+ * @info: info on WR
+ */
+__le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
+ u16 quanta, u32 total_size,
+ struct irdma_post_sq_info *info)
+{
+ __le64 *wqe;
+ __le64 *wqe_0 = NULL;
+ u32 nop_wqe_idx;
+ u16 avail_quanta;
+ u16 i;
+
+ avail_quanta = qp->uk_attrs->max_hw_sq_chunk -
+ (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) %
+ qp->uk_attrs->max_hw_sq_chunk);
+ if (quanta <= avail_quanta) {
+ /* WR fits in current chunk */
+ if (quanta > IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
+ return NULL;
+ } else {
+ /* Need to pad with NOP */
+ if (quanta + avail_quanta >
+ IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
+ return NULL;
+
+ nop_wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
+ for (i = 0; i < avail_quanta; i++) {
+ irdma_nop_1(qp);
+ IRDMA_RING_MOVE_HEAD_NOCHECK(qp->sq_ring);
+ }
+ if (qp->push_db && info->push_wqe)
+ irdma_qp_push_wqe(qp, qp->sq_base[nop_wqe_idx].elem,
+ avail_quanta, nop_wqe_idx, true);
+ }
+
+ *wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
+ if (!*wqe_idx)
+ qp->swqe_polarity = !qp->swqe_polarity;
+
+ IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, quanta);
+
+ wqe = qp->sq_base[*wqe_idx].elem;
+ if (qp->uk_attrs->hw_rev == IRDMA_GEN_1 && quanta == 1 &&
+ (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) & 1)) {
+ wqe_0 = qp->sq_base[IRDMA_RING_CURRENT_HEAD(qp->sq_ring)].elem;
+ wqe_0[3] = cpu_to_le64(FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity ? 0 : 1));
+ }
+ qp->sq_wrtrk_array[*wqe_idx].wrid = info->wr_id;
+ qp->sq_wrtrk_array[*wqe_idx].wr_len = total_size;
+ qp->sq_wrtrk_array[*wqe_idx].quanta = quanta;
+
+ return wqe;
+}
+
+/**
+ * irdma_qp_get_next_recv_wqe - get next qp's rcv wqe
+ * @qp: hw qp ptr
+ * @wqe_idx: return wqe index
+ */
+__le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx)
+{
+ __le64 *wqe;
+ enum irdma_status_code ret_code;
+
+ if (IRDMA_RING_FULL_ERR(qp->rq_ring))
+ return NULL;
+
+ IRDMA_ATOMIC_RING_MOVE_HEAD(qp->rq_ring, *wqe_idx, ret_code);
+ if (ret_code)
+ return NULL;
+
+ if (!*wqe_idx)
+ qp->rwqe_polarity = !qp->rwqe_polarity;
+ /* rq_wqe_size_multiplier is no of 32 byte quanta in one rq wqe */
+ wqe = qp->rq_base[*wqe_idx * qp->rq_wqe_size_multiplier].elem;
+
+ return wqe;
+}
+
+/**
+ * irdma_uk_rdma_write - rdma write operation
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @post_sq: flag to post sq
+ */
+enum irdma_status_code irdma_uk_rdma_write(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info,
+ bool post_sq)
+{
+ u64 hdr;
+ __le64 *wqe;
+ struct irdma_rdma_write *op_info;
+ u32 i, wqe_idx;
+ u32 total_size = 0, byte_off;
+ enum irdma_status_code ret_code;
+ u32 frag_cnt, addl_frag_cnt;
+ bool read_fence = false;
+ u16 quanta;
+
+ info->push_wqe = qp->push_db ? true : false;
+
+ op_info = &info->op.rdma_write;
+ if (op_info->num_lo_sges > qp->max_sq_frag_cnt)
+ return IRDMA_ERR_INVALID_FRAG_COUNT;
+
+ for (i = 0; i < op_info->num_lo_sges; i++)
+ total_size += op_info->lo_sg_list[i].len;
+
+ read_fence |= info->read_fence;
+
+ if (info->imm_data_valid)
+ frag_cnt = op_info->num_lo_sges + 1;
+ else
+ frag_cnt = op_info->num_lo_sges;
+ addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
+ ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);
+ if (ret_code)
+ return ret_code;
+
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
+ info);
+ if (!wqe)
+ return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
+
+ irdma_clr_wqes(qp, wqe_idx);
+
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
+
+ if (info->imm_data_valid) {
+ set_64bit_val(wqe, 0,
+ FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
+ i = 0;
+ } else {
+ qp->wqe_ops.iw_set_fragment(wqe, 0,
+ op_info->lo_sg_list,
+ qp->swqe_polarity);
+ i = 1;
+ }
+
+ for (byte_off = 32; i < op_info->num_lo_sges; i++) {
+ qp->wqe_ops.iw_set_fragment(wqe, byte_off,
+ &op_info->lo_sg_list[i],
+ qp->swqe_polarity);
+ byte_off += 16;
+ }
+
+ /* if not an odd number set valid bit in next fragment */
+ if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) &&
+ frag_cnt) {
+ qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
+ qp->swqe_polarity);
+ if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
+ ++addl_frag_cnt;
+ }
+
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
+ FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
+ FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid) |
+ FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt) |
+ FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
+ FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
+
+ dma_wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+ if (info->push_wqe) {
+ irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
+ } else {
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_uk_rdma_read - rdma read command
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @inv_stag: flag for inv_stag
+ * @post_sq: flag to post sq
+ */
+enum irdma_status_code irdma_uk_rdma_read(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info,
+ bool inv_stag, bool post_sq)
+{
+ struct irdma_rdma_read *op_info;
+ enum irdma_status_code ret_code;
+ u32 i, byte_off, total_size = 0;
+ bool local_fence = false;
+ u32 addl_frag_cnt;
+ __le64 *wqe;
+ u32 wqe_idx;
+ u16 quanta;
+ u64 hdr;
+
+ info->push_wqe = qp->push_db ? true : false;
+
+ op_info = &info->op.rdma_read;
+ if (qp->max_sq_frag_cnt < op_info->num_lo_sges)
+ return IRDMA_ERR_INVALID_FRAG_COUNT;
+
+ for (i = 0; i < op_info->num_lo_sges; i++)
+ total_size += op_info->lo_sg_list[i].len;
+
+ ret_code = irdma_fragcnt_to_quanta_sq(op_info->num_lo_sges, &quanta);
+ if (ret_code)
+ return ret_code;
+
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
+ info);
+ if (!wqe)
+ return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
+
+ irdma_clr_wqes(qp, wqe_idx);
+
+ addl_frag_cnt = op_info->num_lo_sges > 1 ?
+ (op_info->num_lo_sges - 1) : 0;
+ local_fence |= info->local_fence;
+
+ qp->wqe_ops.iw_set_fragment(wqe, 0, op_info->lo_sg_list,
+ qp->swqe_polarity);
+ for (i = 1, byte_off = 32; i < op_info->num_lo_sges; ++i) {
+ qp->wqe_ops.iw_set_fragment(wqe, byte_off,
+ &op_info->lo_sg_list[i],
+ qp->swqe_polarity);
+ byte_off += 16;
+ }
+
+ /* if not an odd number set valid bit in next fragment */
+ if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 &&
+ !(op_info->num_lo_sges & 0x01) && op_info->num_lo_sges) {
+ qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
+ qp->swqe_polarity);
+ if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
+ ++addl_frag_cnt;
+ }
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
+ FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
+ FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
+ FIELD_PREP(IRDMAQPSQ_OPCODE,
+ (inv_stag ? IRDMAQP_OP_RDMA_READ_LOC_INV : IRDMAQP_OP_RDMA_READ)) |
+ FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
+
+ dma_wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+ if (info->push_wqe) {
+ irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
+ } else {
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_uk_send - rdma send command
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @post_sq: flag to post sq
+ */
+enum irdma_status_code irdma_uk_send(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info,
+ bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_post_send *op_info;
+ u64 hdr;
+ u32 i, wqe_idx, total_size = 0, byte_off;
+ enum irdma_status_code ret_code;
+ u32 frag_cnt, addl_frag_cnt;
+ bool read_fence = false;
+ u16 quanta;
+
+ info->push_wqe = qp->push_db ? true : false;
+
+ op_info = &info->op.send;
+ if (qp->max_sq_frag_cnt < op_info->num_sges)
+ return IRDMA_ERR_INVALID_FRAG_COUNT;
+
+ for (i = 0; i < op_info->num_sges; i++)
+ total_size += op_info->sg_list[i].len;
+
+ if (info->imm_data_valid)
+ frag_cnt = op_info->num_sges + 1;
+ else
+ frag_cnt = op_info->num_sges;
+ ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);
+ if (ret_code)
+ return ret_code;
+
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
+ info);
+ if (!wqe)
+ return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
+
+ irdma_clr_wqes(qp, wqe_idx);
+
+ read_fence |= info->read_fence;
+ addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
+ if (info->imm_data_valid) {
+ set_64bit_val(wqe, 0,
+ FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
+ i = 0;
+ } else {
+ qp->wqe_ops.iw_set_fragment(wqe, 0, op_info->sg_list,
+ qp->swqe_polarity);
+ i = 1;
+ }
+
+ for (byte_off = 32; i < op_info->num_sges; i++) {
+ qp->wqe_ops.iw_set_fragment(wqe, byte_off, &op_info->sg_list[i],
+ qp->swqe_polarity);
+ byte_off += 16;
+ }
+
+ /* if not an odd number set valid bit in next fragment */
+ if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) &&
+ frag_cnt) {
+ qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
+ qp->swqe_polarity);
+ if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
+ ++addl_frag_cnt;
+ }
+
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
+ FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
+ FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
+ FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
+ (info->imm_data_valid ? 1 : 0)) |
+ FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
+ FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
+ FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
+ FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
+ FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
+
+ dma_wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+ if (info->push_wqe) {
+ irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
+ } else {
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_set_mw_bind_wqe_gen_1 - set mw bind wqe
+ * @wqe: wqe for setting fragment
+ * @op_info: info for setting bind wqe values
+ */
+static void irdma_set_mw_bind_wqe_gen_1(__le64 *wqe,
+ struct irdma_bind_window *op_info)
+{
+ set_64bit_val(wqe, 0, (uintptr_t)op_info->va);
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mw_stag) |
+ FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mr_stag));
+ set_64bit_val(wqe, 16, op_info->bind_len);
+}
+
+/**
+ * irdma_copy_inline_data_gen_1 - Copy inline data to wqe
+ * @dest: pointer to wqe
+ * @src: pointer to inline data
+ * @len: length of inline data to copy
+ * @polarity: compatibility parameter
+ */
+static void irdma_copy_inline_data_gen_1(u8 *dest, u8 *src, u32 len,
+ u8 polarity)
+{
+ if (len <= 16) {
+ memcpy(dest, src, len);
+ } else {
+ memcpy(dest, src, 16);
+ src += 16;
+ dest = dest + 32;
+ memcpy(dest, src, len - 16);
+ }
+}
+
+/**
+ * irdma_inline_data_size_to_quanta_gen_1 - based on inline data, quanta
+ * @data_size: data size for inline
+ *
+ * Gets the quanta based on inline and immediate data.
+ */
+static inline u16 irdma_inline_data_size_to_quanta_gen_1(u32 data_size)
+{
+ return data_size <= 16 ? IRDMA_QP_WQE_MIN_QUANTA : 2;
+}
+
+/**
+ * irdma_set_mw_bind_wqe - set mw bind in wqe
+ * @wqe: wqe for setting mw bind
+ * @op_info: info for setting wqe values
+ */
+static void irdma_set_mw_bind_wqe(__le64 *wqe,
+ struct irdma_bind_window *op_info)
+{
+ set_64bit_val(wqe, 0, (uintptr_t)op_info->va);
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mr_stag) |
+ FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mw_stag));
+ set_64bit_val(wqe, 16, op_info->bind_len);
+}
+
+/**
+ * irdma_copy_inline_data - Copy inline data to wqe
+ * @dest: pointer to wqe
+ * @src: pointer to inline data
+ * @len: length of inline data to copy
+ * @polarity: polarity of wqe valid bit
+ */
+static void irdma_copy_inline_data(u8 *dest, u8 *src, u32 len, u8 polarity)
+{
+ u8 inline_valid = polarity << IRDMA_INLINE_VALID_S;
+ u32 copy_size;
+
+ dest += 8;
+ if (len <= 8) {
+ memcpy(dest, src, len);
+ return;
+ }
+
+ *((u64 *)dest) = *((u64 *)src);
+ len -= 8;
+ src += 8;
+ dest += 24; /* point to additional 32 byte quanta */
+
+ while (len) {
+ copy_size = len < 31 ? len : 31;
+ memcpy(dest, src, copy_size);
+ *(dest + 31) = inline_valid;
+ len -= copy_size;
+ dest += 32;
+ src += copy_size;
+ }
+}
+
+/**
+ * irdma_inline_data_size_to_quanta - based on inline data, quanta
+ * @data_size: data size for inline
+ *
+ * Gets the quanta based on inline and immediate data.
+ */
+static u16 irdma_inline_data_size_to_quanta(u32 data_size)
+{
+ if (data_size <= 8)
+ return IRDMA_QP_WQE_MIN_QUANTA;
+ else if (data_size <= 39)
+ return 2;
+ else if (data_size <= 70)
+ return 3;
+ else if (data_size <= 101)
+ return 4;
+ else if (data_size <= 132)
+ return 5;
+ else if (data_size <= 163)
+ return 6;
+ else if (data_size <= 194)
+ return 7;
+ else
+ return 8;
+}
+
+/**
+ * irdma_uk_inline_rdma_write - inline rdma write operation
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @post_sq: flag to post sq
+ */
+enum irdma_status_code
+irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
+ bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_inline_rdma_write *op_info;
+ u64 hdr = 0;
+ u32 wqe_idx;
+ bool read_fence = false;
+ u16 quanta;
+
+ info->push_wqe = qp->push_db ? true : false;
+ op_info = &info->op.inline_rdma_write;
+
+ if (op_info->len > qp->max_inline_data)
+ return IRDMA_ERR_INVALID_INLINE_DATA_SIZE;
+
+ quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(op_info->len);
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, op_info->len,
+ info);
+ if (!wqe)
+ return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
+
+ irdma_clr_wqes(qp, wqe_idx);
+
+ read_fence |= info->read_fence;
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
+
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
+ FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
+ FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, op_info->len) |
+ FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt ? 1 : 0) |
+ FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
+ FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid ? 1 : 0) |
+ FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe ? 1 : 0) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
+
+ if (info->imm_data_valid)
+ set_64bit_val(wqe, 0,
+ FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
+
+ qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->data, op_info->len,
+ qp->swqe_polarity);
+ dma_wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ if (info->push_wqe) {
+ irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
+ } else {
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_uk_inline_send - inline send operation
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @post_sq: flag to post sq
+ */
+enum irdma_status_code irdma_uk_inline_send(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info,
+ bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_post_inline_send *op_info;
+ u64 hdr;
+ u32 wqe_idx;
+ bool read_fence = false;
+ u16 quanta;
+
+ info->push_wqe = qp->push_db ? true : false;
+ op_info = &info->op.inline_send;
+
+ if (op_info->len > qp->max_inline_data)
+ return IRDMA_ERR_INVALID_INLINE_DATA_SIZE;
+
+ quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(op_info->len);
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, op_info->len,
+ info);
+ if (!wqe)
+ return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
+
+ irdma_clr_wqes(qp, wqe_idx);
+
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
+ FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
+
+ read_fence |= info->read_fence;
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
+ FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
+ FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
+ FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, op_info->len) |
+ FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
+ (info->imm_data_valid ? 1 : 0)) |
+ FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
+ FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
+ FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
+ FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
+
+ if (info->imm_data_valid)
+ set_64bit_val(wqe, 0,
+ FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
+ qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->data, op_info->len,
+ qp->swqe_polarity);
+
+ dma_wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ if (info->push_wqe) {
+ irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
+ } else {
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_uk_stag_local_invalidate - stag invalidate operation
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @post_sq: flag to post sq
+ */
+enum irdma_status_code
+irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info, bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_inv_local_stag *op_info;
+ u64 hdr;
+ u32 wqe_idx;
+ bool local_fence = false;
+ struct irdma_sge sge = {};
+
+ info->push_wqe = qp->push_db ? true : false;
+ op_info = &info->op.inv_local_stag;
+ local_fence = info->local_fence;
+
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
+ 0, info);
+ if (!wqe)
+ return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
+
+ irdma_clr_wqes(qp, wqe_idx);
+
+ sge.stag = op_info->target_stag;
+ qp->wqe_ops.iw_set_fragment(wqe, 0, &sge, 0);
+
+ set_64bit_val(wqe, 16, 0);
+
+ hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMA_OP_TYPE_INV_STAG) |
+ FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
+
+ dma_wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ if (info->push_wqe) {
+ irdma_qp_push_wqe(qp, wqe, IRDMA_QP_WQE_MIN_QUANTA, wqe_idx,
+ post_sq);
+ } else {
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_uk_mw_bind - bind Memory Window
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @post_sq: flag to post sq
+ */
+enum irdma_status_code irdma_uk_mw_bind(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info,
+ bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_bind_window *op_info;
+ u64 hdr;
+ u32 wqe_idx;
+ bool local_fence = false;
+
+ info->push_wqe = qp->push_db ? true : false;
+ op_info = &info->op.bind_window;
+ local_fence |= info->local_fence;
+
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
+ 0, info);
+ if (!wqe)
+ return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
+
+ irdma_clr_wqes(qp, wqe_idx);
+
+ qp->wqe_ops.iw_set_mw_bind_wqe(wqe, op_info);
+
+ hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMA_OP_TYPE_BIND_MW) |
+ FIELD_PREP(IRDMAQPSQ_STAGRIGHTS,
+ ((op_info->ena_reads << 2) | (op_info->ena_writes << 3))) |
+ FIELD_PREP(IRDMAQPSQ_VABASEDTO,
+ (op_info->addressing_type == IRDMA_ADDR_TYPE_VA_BASED ? 1 : 0)) |
+ FIELD_PREP(IRDMAQPSQ_MEMWINDOWTYPE,
+ (op_info->mem_window_type_1 ? 1 : 0)) |
+ FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
+
+ dma_wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ if (info->push_wqe) {
+ irdma_qp_push_wqe(qp, wqe, IRDMA_QP_WQE_MIN_QUANTA, wqe_idx,
+ post_sq);
+ } else {
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_uk_post_receive - post receive wqe
+ * @qp: hw qp ptr
+ * @info: post rq information
+ */
+enum irdma_status_code irdma_uk_post_receive(struct irdma_qp_uk *qp,
+ struct irdma_post_rq_info *info)
+{
+ u32 total_size = 0, wqe_idx, i, byte_off;
+ u32 addl_frag_cnt;
+ __le64 *wqe;
+ u64 hdr;
+
+ if (qp->max_rq_frag_cnt < info->num_sges)
+ return IRDMA_ERR_INVALID_FRAG_COUNT;
+
+ for (i = 0; i < info->num_sges; i++)
+ total_size += info->sg_list[i].len;
+
+ wqe = irdma_qp_get_next_recv_wqe(qp, &wqe_idx);
+ if (!wqe)
+ return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
+
+ qp->rq_wrid_array[wqe_idx] = info->wr_id;
+ addl_frag_cnt = info->num_sges > 1 ? (info->num_sges - 1) : 0;
+ qp->wqe_ops.iw_set_fragment(wqe, 0, info->sg_list,
+ qp->rwqe_polarity);
+
+ for (i = 1, byte_off = 32; i < info->num_sges; i++) {
+ qp->wqe_ops.iw_set_fragment(wqe, byte_off, &info->sg_list[i],
+ qp->rwqe_polarity);
+ byte_off += 16;
+ }
+
+ /* if not an odd number set valid bit in next fragment */
+ if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(info->num_sges & 0x01) &&
+ info->num_sges) {
+ qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
+ qp->rwqe_polarity);
+ if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
+ ++addl_frag_cnt;
+ }
+
+ set_64bit_val(wqe, 16, 0);
+ hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->rwqe_polarity);
+
+ dma_wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ return 0;
+}
+
+/**
+ * irdma_uk_cq_resize - reset the cq buffer info
+ * @cq: cq to resize
+ * @cq_base: new cq buffer addr
+ * @cq_size: number of cqes
+ */
+void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int cq_size)
+{
+ cq->cq_base = cq_base;
+ cq->cq_size = cq_size;
+ IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
+ cq->polarity = 1;
+}
+
+/**
+ * irdma_uk_cq_set_resized_cnt - record the count of the resized buffers
+ * @cq: cq to resize
+ * @cq_cnt: the count of the resized cq buffers
+ */
+void irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *cq, u16 cq_cnt)
+{
+ u64 temp_val;
+ u16 sw_cq_sel;
+ u8 arm_next_se;
+ u8 arm_next;
+ u8 arm_seq_num;
+
+ get_64bit_val(cq->shadow_area, 32, &temp_val);
+
+ sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
+ sw_cq_sel += cq_cnt;
+
+ arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
+ arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
+ arm_next = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT, temp_val);
+
+ temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
+ FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
+ FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
+ FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
+
+ set_64bit_val(cq->shadow_area, 32, temp_val);
+}
+
+/**
+ * irdma_uk_cq_request_notification - cq notification request (door bell)
+ * @cq: hw cq
+ * @cq_notify: notification type
+ */
+void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
+ enum irdma_cmpl_notify cq_notify)
+{
+ u64 temp_val;
+ u16 sw_cq_sel;
+ u8 arm_next_se = 0;
+ u8 arm_next = 0;
+ u8 arm_seq_num;
+
+ get_64bit_val(cq->shadow_area, 32, &temp_val);
+ arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
+ arm_seq_num++;
+ sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
+ arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
+ arm_next_se |= 1;
+ if (cq_notify == IRDMA_CQ_COMPL_EVENT)
+ arm_next = 1;
+ temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
+ FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
+ FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
+ FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
+
+ set_64bit_val(cq->shadow_area, 32, temp_val);
+
+ dma_wmb(); /* make sure WQE is populated before valid bit is set */
+
+ writel(cq->cq_id, cq->cqe_alloc_db);
+}
+
+/**
+ * irdma_uk_cq_poll_cmpl - get cq completion info
+ * @cq: hw cq
+ * @info: cq poll information returned
+ */
+enum irdma_status_code
+irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info)
+{
+ u64 comp_ctx, qword0, qword2, qword3;
+ __le64 *cqe;
+ struct irdma_qp_uk *qp;
+ struct irdma_ring *pring = NULL;
+ u32 wqe_idx, q_type;
+ enum irdma_status_code ret_code;
+ bool move_cq_head = true;
+ u8 polarity;
+ bool ext_valid;
+ __le64 *ext_cqe;
+
+ if (cq->avoid_mem_cflct)
+ cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(cq);
+ else
+ cqe = IRDMA_GET_CURRENT_CQ_ELEM(cq);
+
+ get_64bit_val(cqe, 24, &qword3);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
+ if (polarity != cq->polarity)
+ return IRDMA_ERR_Q_EMPTY;
+
+ /* Ensure CQE contents are read after valid bit is checked */
+ dma_rmb();
+
+ ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3);
+ if (ext_valid) {
+ u64 qword6, qword7;
+ u32 peek_head;
+
+ if (cq->avoid_mem_cflct) {
+ ext_cqe = (__le64 *)((u8 *)cqe + 32);
+ get_64bit_val(ext_cqe, 24, &qword7);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
+ } else {
+ peek_head = (cq->cq_ring.head + 1) % cq->cq_ring.size;
+ ext_cqe = cq->cq_base[peek_head].buf;
+ get_64bit_val(ext_cqe, 24, &qword7);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
+ if (!peek_head)
+ polarity ^= 1;
+ }
+ if (polarity != cq->polarity)
+ return IRDMA_ERR_Q_EMPTY;
+
+ /* Ensure ext CQE contents are read after ext valid bit is checked */
+ dma_rmb();
+
+ info->imm_valid = (bool)FIELD_GET(IRDMA_CQ_IMMVALID, qword7);
+ if (info->imm_valid) {
+ u64 qword4;
+
+ get_64bit_val(ext_cqe, 0, &qword4);
+ info->imm_data = (u32)FIELD_GET(IRDMA_CQ_IMMDATALOW32, qword4);
+ }
+ info->ud_smac_valid = (bool)FIELD_GET(IRDMA_CQ_UDSMACVALID, qword7);
+ info->ud_vlan_valid = (bool)FIELD_GET(IRDMA_CQ_UDVLANVALID, qword7);
+ if (info->ud_smac_valid || info->ud_vlan_valid) {
+ get_64bit_val(ext_cqe, 16, &qword6);
+ if (info->ud_vlan_valid)
+ info->ud_vlan = (u16)FIELD_GET(IRDMA_CQ_UDVLAN, qword6);
+ if (info->ud_smac_valid) {
+ info->ud_smac[5] = qword6 & 0xFF;
+ info->ud_smac[4] = (qword6 >> 8) & 0xFF;
+ info->ud_smac[3] = (qword6 >> 16) & 0xFF;
+ info->ud_smac[2] = (qword6 >> 24) & 0xFF;
+ info->ud_smac[1] = (qword6 >> 32) & 0xFF;
+ info->ud_smac[0] = (qword6 >> 40) & 0xFF;
+ }
+ }
+ } else {
+ info->imm_valid = false;
+ info->ud_smac_valid = false;
+ info->ud_vlan_valid = false;
+ }
+
+ q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
+ info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
+ info->push_dropped = (bool)FIELD_GET(IRDMACQ_PSHDROP, qword3);
+ info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
+ if (info->error) {
+ info->major_err = FIELD_GET(IRDMA_CQ_MAJERR, qword3);
+ info->minor_err = FIELD_GET(IRDMA_CQ_MINERR, qword3);
+ if (info->major_err == IRDMA_FLUSH_MAJOR_ERR) {
+ info->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
+ /* Set the min error to standard flush error code for remaining cqes */
+ if (info->minor_err != FLUSH_GENERAL_ERR) {
+ qword3 &= ~IRDMA_CQ_MINERR;
+ qword3 |= FIELD_PREP(IRDMA_CQ_MINERR, FLUSH_GENERAL_ERR);
+ set_64bit_val(cqe, 24, qword3);
+ }
+ } else {
+ info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN;
+ }
+ } else {
+ info->comp_status = IRDMA_COMPL_STATUS_SUCCESS;
+ }
+
+ get_64bit_val(cqe, 0, &qword0);
+ get_64bit_val(cqe, 16, &qword2);
+
+ info->tcp_seq_num_rtt = (u32)FIELD_GET(IRDMACQ_TCPSEQNUMRTT, qword0);
+ info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2);
+ info->ud_src_qpn = (u32)FIELD_GET(IRDMACQ_UDSRCQPN, qword2);
+
+ get_64bit_val(cqe, 8, &comp_ctx);
+
+ info->solicited_event = (bool)FIELD_GET(IRDMACQ_SOEVENT, qword3);
+ qp = (struct irdma_qp_uk *)(unsigned long)comp_ctx;
+ if (!qp || qp->destroy_pending) {
+ ret_code = IRDMA_ERR_Q_DESTROYED;
+ goto exit;
+ }
+ wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
+ info->qp_handle = (irdma_qp_handle)(unsigned long)qp;
+
+ if (q_type == IRDMA_CQE_QTYPE_RQ) {
+ u32 array_idx;
+
+ array_idx = wqe_idx / qp->rq_wqe_size_multiplier;
+
+ if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED ||
+ info->comp_status == IRDMA_COMPL_STATUS_UNKNOWN) {
+ if (!IRDMA_RING_MORE_WORK(qp->rq_ring)) {
+ ret_code = IRDMA_ERR_Q_EMPTY;
+ goto exit;
+ }
+
+ info->wr_id = qp->rq_wrid_array[qp->rq_ring.tail];
+ array_idx = qp->rq_ring.tail;
+ } else {
+ info->wr_id = qp->rq_wrid_array[array_idx];
+ }
+
+ info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
+
+ if (info->imm_valid)
+ info->op_type = IRDMA_OP_TYPE_REC_IMM;
+ else
+ info->op_type = IRDMA_OP_TYPE_REC;
+ if (qword3 & IRDMACQ_STAG) {
+ info->stag_invalid_set = true;
+ info->inv_stag = (u32)FIELD_GET(IRDMACQ_INVSTAG, qword2);
+ } else {
+ info->stag_invalid_set = false;
+ }
+ IRDMA_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
+ if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) {
+ qp->rq_flush_seen = true;
+ if (!IRDMA_RING_MORE_WORK(qp->rq_ring))
+ qp->rq_flush_complete = true;
+ else
+ move_cq_head = false;
+ }
+ pring = &qp->rq_ring;
+ } else { /* q_type is IRDMA_CQE_QTYPE_SQ */
+ if (qp->first_sq_wq) {
+ if (wqe_idx + 1 >= qp->conn_wqes)
+ qp->first_sq_wq = false;
+
+ if (wqe_idx < qp->conn_wqes && qp->sq_ring.head == qp->sq_ring.tail) {
+ IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
+ IRDMA_RING_MOVE_TAIL(cq->cq_ring);
+ set_64bit_val(cq->shadow_area, 0,
+ IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
+ memset(info, 0,
+ sizeof(struct irdma_cq_poll_info));
+ return irdma_uk_cq_poll_cmpl(cq, info);
+ }
+ }
+ /*cease posting push mode on push drop*/
+ if (info->push_dropped) {
+ qp->push_mode = false;
+ qp->push_dropped = true;
+ }
+ if (info->comp_status != IRDMA_COMPL_STATUS_FLUSHED) {
+ info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
+ if (!info->comp_status)
+ info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len;
+ info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
+ IRDMA_RING_SET_TAIL(qp->sq_ring,
+ wqe_idx + qp->sq_wrtrk_array[wqe_idx].quanta);
+ } else {
+ if (!IRDMA_RING_MORE_WORK(qp->sq_ring)) {
+ ret_code = IRDMA_ERR_Q_EMPTY;
+ goto exit;
+ }
+
+ do {
+ __le64 *sw_wqe;
+ u64 wqe_qword;
+ u8 op_type;
+ u32 tail;
+
+ tail = qp->sq_ring.tail;
+ sw_wqe = qp->sq_base[tail].elem;
+ get_64bit_val(sw_wqe, 24,
+ &wqe_qword);
+ op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE, wqe_qword);
+ info->op_type = op_type;
+ IRDMA_RING_SET_TAIL(qp->sq_ring,
+ tail + qp->sq_wrtrk_array[tail].quanta);
+ if (op_type != IRDMAQP_OP_NOP) {
+ info->wr_id = qp->sq_wrtrk_array[tail].wrid;
+ info->bytes_xfered = qp->sq_wrtrk_array[tail].wr_len;
+ break;
+ }
+ } while (1);
+ qp->sq_flush_seen = true;
+ if (!IRDMA_RING_MORE_WORK(qp->sq_ring))
+ qp->sq_flush_complete = true;
+ }
+ pring = &qp->sq_ring;
+ }
+
+ ret_code = 0;
+
+exit:
+ if (!ret_code && info->comp_status == IRDMA_COMPL_STATUS_FLUSHED)
+ if (pring && IRDMA_RING_MORE_WORK(*pring))
+ move_cq_head = false;
+
+ if (move_cq_head) {
+ IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
+ if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
+ cq->polarity ^= 1;
+
+ if (ext_valid && !cq->avoid_mem_cflct) {
+ IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
+ if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
+ cq->polarity ^= 1;
+ }
+
+ IRDMA_RING_MOVE_TAIL(cq->cq_ring);
+ if (!cq->avoid_mem_cflct && ext_valid)
+ IRDMA_RING_MOVE_TAIL(cq->cq_ring);
+ set_64bit_val(cq->shadow_area, 0,
+ IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
+ } else {
+ qword3 &= ~IRDMA_CQ_WQEIDX;
+ qword3 |= FIELD_PREP(IRDMA_CQ_WQEIDX, pring->tail);
+ set_64bit_val(cqe, 24, qword3);
+ }
+
+ return ret_code;
+}
+
+/**
+ * irdma_qp_round_up - return round up qp wq depth
+ * @wqdepth: wq depth in quanta to round up
+ */
+static int irdma_qp_round_up(u32 wqdepth)
+{
+ int scount = 1;
+
+ for (wqdepth--; scount <= 16; scount *= 2)
+ wqdepth |= wqdepth >> scount;
+
+ return ++wqdepth;
+}
+
+/**
+ * irdma_get_wqe_shift - get shift count for maximum wqe size
+ * @uk_attrs: qp HW attributes
+ * @sge: Maximum Scatter Gather Elements wqe
+ * @inline_data: Maximum inline data size
+ * @shift: Returns the shift needed based on sge
+ *
+ * Shift can be used to left shift the wqe size based on number of SGEs and inlind data size.
+ * For 1 SGE or inline data <= 8, shift = 0 (wqe size of 32
+ * bytes). For 2 or 3 SGEs or inline data <= 39, shift = 1 (wqe
+ * size of 64 bytes).
+ * For 4-7 SGE's and inline <= 101 Shift of 2 otherwise (wqe
+ * size of 256 bytes).
+ */
+void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
+ u32 inline_data, u8 *shift)
+{
+ *shift = 0;
+ if (uk_attrs->hw_rev >= IRDMA_GEN_2) {
+ if (sge > 1 || inline_data > 8) {
+ if (sge < 4 && inline_data <= 39)
+ *shift = 1;
+ else if (sge < 8 && inline_data <= 101)
+ *shift = 2;
+ else
+ *shift = 3;
+ }
+ } else if (sge > 1 || inline_data > 16) {
+ *shift = (sge < 4 && inline_data <= 48) ? 1 : 2;
+ }
+}
+
+/*
+ * irdma_get_sqdepth - get SQ depth (quanta)
+ * @uk_attrs: qp HW attributes
+ * @sq_size: SQ size
+ * @shift: shift which determines size of WQE
+ * @sqdepth: depth of SQ
+ *
+ */
+enum irdma_status_code irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs,
+ u32 sq_size, u8 shift, u32 *sqdepth)
+{
+ *sqdepth = irdma_qp_round_up((sq_size << shift) + IRDMA_SQ_RSVD);
+
+ if (*sqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift))
+ *sqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift;
+ else if (*sqdepth > uk_attrs->max_hw_wq_quanta)
+ return IRDMA_ERR_INVALID_SIZE;
+
+ return 0;
+}
+
+/*
+ * irdma_get_rqdepth - get RQ depth (quanta)
+ * @uk_attrs: qp HW attributes
+ * @rq_size: RQ size
+ * @shift: shift which determines size of WQE
+ * @rqdepth: depth of RQ
+ */
+enum irdma_status_code irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs,
+ u32 rq_size, u8 shift, u32 *rqdepth)
+{
+ *rqdepth = irdma_qp_round_up((rq_size << shift) + IRDMA_RQ_RSVD);
+
+ if (*rqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift))
+ *rqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift;
+ else if (*rqdepth > uk_attrs->max_hw_rq_quanta)
+ return IRDMA_ERR_INVALID_SIZE;
+
+ return 0;
+}
+
+static const struct irdma_wqe_uk_ops iw_wqe_uk_ops = {
+ .iw_copy_inline_data = irdma_copy_inline_data,
+ .iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta,
+ .iw_set_fragment = irdma_set_fragment,
+ .iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe,
+};
+
+static const struct irdma_wqe_uk_ops iw_wqe_uk_ops_gen_1 = {
+ .iw_copy_inline_data = irdma_copy_inline_data_gen_1,
+ .iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta_gen_1,
+ .iw_set_fragment = irdma_set_fragment_gen_1,
+ .iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe_gen_1,
+};
+
+/**
+ * irdma_setup_connection_wqes - setup WQEs necessary to complete
+ * connection.
+ * @qp: hw qp (user and kernel)
+ * @info: qp initialization info
+ */
+static void irdma_setup_connection_wqes(struct irdma_qp_uk *qp,
+ struct irdma_qp_uk_init_info *info)
+{
+ u16 move_cnt = 1;
+
+ if (!info->legacy_mode &&
+ (qp->uk_attrs->feature_flags & IRDMA_FEATURE_RTS_AE))
+ move_cnt = 3;
+
+ qp->conn_wqes = move_cnt;
+ IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, move_cnt);
+ IRDMA_RING_MOVE_TAIL_BY_COUNT(qp->sq_ring, move_cnt);
+ IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->initial_ring, move_cnt);
+}
+
+/**
+ * irdma_uk_qp_init - initialize shared qp
+ * @qp: hw qp (user and kernel)
+ * @info: qp initialization info
+ *
+ * initializes the vars used in both user and kernel mode.
+ * size of the wqe depends on numbers of max. fragements
+ * allowed. Then size of wqe * the number of wqes should be the
+ * amount of memory allocated for sq and rq.
+ */
+enum irdma_status_code irdma_uk_qp_init(struct irdma_qp_uk *qp,
+ struct irdma_qp_uk_init_info *info)
+{
+ enum irdma_status_code ret_code = 0;
+ u32 sq_ring_size;
+ u8 sqshift, rqshift;
+
+ qp->uk_attrs = info->uk_attrs;
+ if (info->max_sq_frag_cnt > qp->uk_attrs->max_hw_wq_frags ||
+ info->max_rq_frag_cnt > qp->uk_attrs->max_hw_wq_frags)
+ return IRDMA_ERR_INVALID_FRAG_COUNT;
+
+ irdma_get_wqe_shift(qp->uk_attrs, info->max_rq_frag_cnt, 0, &rqshift);
+ if (qp->uk_attrs->hw_rev == IRDMA_GEN_1) {
+ irdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt,
+ info->max_inline_data, &sqshift);
+ if (info->abi_ver > 4)
+ rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
+ } else {
+ irdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt + 1,
+ info->max_inline_data, &sqshift);
+ }
+ qp->qp_caps = info->qp_caps;
+ qp->sq_base = info->sq;
+ qp->rq_base = info->rq;
+ qp->qp_type = info->type ? info->type : IRDMA_QP_TYPE_IWARP;
+ qp->shadow_area = info->shadow_area;
+ qp->sq_wrtrk_array = info->sq_wrtrk_array;
+
+ qp->rq_wrid_array = info->rq_wrid_array;
+ qp->wqe_alloc_db = info->wqe_alloc_db;
+ qp->qp_id = info->qp_id;
+ qp->sq_size = info->sq_size;
+ qp->push_mode = false;
+ qp->max_sq_frag_cnt = info->max_sq_frag_cnt;
+ sq_ring_size = qp->sq_size << sqshift;
+ IRDMA_RING_INIT(qp->sq_ring, sq_ring_size);
+ IRDMA_RING_INIT(qp->initial_ring, sq_ring_size);
+ if (info->first_sq_wq) {
+ irdma_setup_connection_wqes(qp, info);
+ qp->swqe_polarity = 1;
+ qp->first_sq_wq = true;
+ } else {
+ qp->swqe_polarity = 0;
+ }
+ qp->swqe_polarity_deferred = 1;
+ qp->rwqe_polarity = 0;
+ qp->rq_size = info->rq_size;
+ qp->max_rq_frag_cnt = info->max_rq_frag_cnt;
+ qp->max_inline_data = info->max_inline_data;
+ qp->rq_wqe_size = rqshift;
+ IRDMA_RING_INIT(qp->rq_ring, qp->rq_size);
+ qp->rq_wqe_size_multiplier = 1 << rqshift;
+ if (qp->uk_attrs->hw_rev == IRDMA_GEN_1)
+ qp->wqe_ops = iw_wqe_uk_ops_gen_1;
+ else
+ qp->wqe_ops = iw_wqe_uk_ops;
+ return ret_code;
+}
+
+/**
+ * irdma_uk_cq_init - initialize shared cq (user and kernel)
+ * @cq: hw cq
+ * @info: hw cq initialization info
+ */
+enum irdma_status_code irdma_uk_cq_init(struct irdma_cq_uk *cq,
+ struct irdma_cq_uk_init_info *info)
+{
+ cq->cq_base = info->cq_base;
+ cq->cq_id = info->cq_id;
+ cq->cq_size = info->cq_size;
+ cq->cqe_alloc_db = info->cqe_alloc_db;
+ cq->cq_ack_db = info->cq_ack_db;
+ cq->shadow_area = info->shadow_area;
+ cq->avoid_mem_cflct = info->avoid_mem_cflct;
+ IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
+ cq->polarity = 1;
+
+ return 0;
+}
+
+/**
+ * irdma_uk_clean_cq - clean cq entries
+ * @q: completion context
+ * @cq: cq to clean
+ */
+void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq)
+{
+ __le64 *cqe;
+ u64 qword3, comp_ctx;
+ u32 cq_head;
+ u8 polarity, temp;
+
+ cq_head = cq->cq_ring.head;
+ temp = cq->polarity;
+ do {
+ if (cq->avoid_mem_cflct)
+ cqe = ((struct irdma_extended_cqe *)(cq->cq_base))[cq_head].buf;
+ else
+ cqe = cq->cq_base[cq_head].buf;
+ get_64bit_val(cqe, 24, &qword3);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
+
+ if (polarity != temp)
+ break;
+
+ get_64bit_val(cqe, 8, &comp_ctx);
+ if ((void *)(unsigned long)comp_ctx == q)
+ set_64bit_val(cqe, 8, 0);
+
+ cq_head = (cq_head + 1) % cq->cq_ring.size;
+ if (!cq_head)
+ temp ^= 1;
+ } while (true);
+}
+
+/**
+ * irdma_nop - post a nop
+ * @qp: hw qp ptr
+ * @wr_id: work request id
+ * @signaled: signaled for completion
+ * @post_sq: ring doorbell
+ */
+enum irdma_status_code irdma_nop(struct irdma_qp_uk *qp, u64 wr_id,
+ bool signaled, bool post_sq)
+{
+ __le64 *wqe;
+ u64 hdr;
+ u32 wqe_idx;
+ struct irdma_post_sq_info info = {};
+
+ info.push_wqe = false;
+ info.wr_id = wr_id;
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
+ 0, &info);
+ if (!wqe)
+ return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
+
+ irdma_clr_wqes(qp, wqe_idx);
+
+ set_64bit_val(wqe, 0, 0);
+ set_64bit_val(wqe, 8, 0);
+ set_64bit_val(wqe, 16, 0);
+
+ hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
+
+ dma_wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
+
+ return 0;
+}
+
+/**
+ * irdma_fragcnt_to_quanta_sq - calculate quanta based on fragment count for SQ
+ * @frag_cnt: number of fragments
+ * @quanta: quanta for frag_cnt
+ */
+enum irdma_status_code irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta)
+{
+ switch (frag_cnt) {
+ case 0:
+ case 1:
+ *quanta = IRDMA_QP_WQE_MIN_QUANTA;
+ break;
+ case 2:
+ case 3:
+ *quanta = 2;
+ break;
+ case 4:
+ case 5:
+ *quanta = 3;
+ break;
+ case 6:
+ case 7:
+ *quanta = 4;
+ break;
+ case 8:
+ case 9:
+ *quanta = 5;
+ break;
+ case 10:
+ case 11:
+ *quanta = 6;
+ break;
+ case 12:
+ case 13:
+ *quanta = 7;
+ break;
+ case 14:
+ case 15: /* when immediate data is present */
+ *quanta = 8;
+ break;
+ default:
+ return IRDMA_ERR_INVALID_FRAG_COUNT;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ
+ * @frag_cnt: number of fragments
+ * @wqe_size: size in bytes given frag_cnt
+ */
+enum irdma_status_code irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size)
+{
+ switch (frag_cnt) {
+ case 0:
+ case 1:
+ *wqe_size = 32;
+ break;
+ case 2:
+ case 3:
+ *wqe_size = 64;
+ break;
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ *wqe_size = 128;
+ break;
+ case 8:
+ case 9:
+ case 10:
+ case 11:
+ case 12:
+ case 13:
+ case 14:
+ *wqe_size = 256;
+ break;
+ default:
+ return IRDMA_ERR_INVALID_FRAG_COUNT;
+ }
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/irdma/user.h b/drivers/infiniband/hw/irdma/user.h
new file mode 100644
index 000000000000..ff705f323233
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/user.h
@@ -0,0 +1,437 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2015 - 2020 Intel Corporation */
+#ifndef IRDMA_USER_H
+#define IRDMA_USER_H
+
+#define irdma_handle void *
+#define irdma_adapter_handle irdma_handle
+#define irdma_qp_handle irdma_handle
+#define irdma_cq_handle irdma_handle
+#define irdma_pd_id irdma_handle
+#define irdma_stag_handle irdma_handle
+#define irdma_stag_index u32
+#define irdma_stag u32
+#define irdma_stag_key u8
+#define irdma_tagged_offset u64
+#define irdma_access_privileges u32
+#define irdma_physical_fragment u64
+#define irdma_address_list u64 *
+#define irdma_sgl struct irdma_sge *
+
+#define IRDMA_MAX_MR_SIZE 0x200000000000ULL
+
+#define IRDMA_ACCESS_FLAGS_LOCALREAD 0x01
+#define IRDMA_ACCESS_FLAGS_LOCALWRITE 0x02
+#define IRDMA_ACCESS_FLAGS_REMOTEREAD_ONLY 0x04
+#define IRDMA_ACCESS_FLAGS_REMOTEREAD 0x05
+#define IRDMA_ACCESS_FLAGS_REMOTEWRITE_ONLY 0x08
+#define IRDMA_ACCESS_FLAGS_REMOTEWRITE 0x0a
+#define IRDMA_ACCESS_FLAGS_BIND_WINDOW 0x10
+#define IRDMA_ACCESS_FLAGS_ZERO_BASED 0x20
+#define IRDMA_ACCESS_FLAGS_ALL 0x3f
+
+#define IRDMA_OP_TYPE_RDMA_WRITE 0x00
+#define IRDMA_OP_TYPE_RDMA_READ 0x01
+#define IRDMA_OP_TYPE_SEND 0x03
+#define IRDMA_OP_TYPE_SEND_INV 0x04
+#define IRDMA_OP_TYPE_SEND_SOL 0x05
+#define IRDMA_OP_TYPE_SEND_SOL_INV 0x06
+#define IRDMA_OP_TYPE_RDMA_WRITE_SOL 0x0d
+#define IRDMA_OP_TYPE_BIND_MW 0x08
+#define IRDMA_OP_TYPE_FAST_REG_NSMR 0x09
+#define IRDMA_OP_TYPE_INV_STAG 0x0a
+#define IRDMA_OP_TYPE_RDMA_READ_INV_STAG 0x0b
+#define IRDMA_OP_TYPE_NOP 0x0c
+#define IRDMA_OP_TYPE_REC 0x3e
+#define IRDMA_OP_TYPE_REC_IMM 0x3f
+
+#define IRDMA_FLUSH_MAJOR_ERR 1
+
+enum irdma_device_caps_const {
+ IRDMA_WQE_SIZE = 4,
+ IRDMA_CQP_WQE_SIZE = 8,
+ IRDMA_CQE_SIZE = 4,
+ IRDMA_EXTENDED_CQE_SIZE = 8,
+ IRDMA_AEQE_SIZE = 2,
+ IRDMA_CEQE_SIZE = 1,
+ IRDMA_CQP_CTX_SIZE = 8,
+ IRDMA_SHADOW_AREA_SIZE = 8,
+ IRDMA_QUERY_FPM_BUF_SIZE = 176,
+ IRDMA_COMMIT_FPM_BUF_SIZE = 176,
+ IRDMA_GATHER_STATS_BUF_SIZE = 1024,
+ IRDMA_MIN_IW_QP_ID = 0,
+ IRDMA_MAX_IW_QP_ID = 262143,
+ IRDMA_MIN_CEQID = 0,
+ IRDMA_MAX_CEQID = 1023,
+ IRDMA_CEQ_MAX_COUNT = IRDMA_MAX_CEQID + 1,
+ IRDMA_MIN_CQID = 0,
+ IRDMA_MAX_CQID = 524287,
+ IRDMA_MIN_AEQ_ENTRIES = 1,
+ IRDMA_MAX_AEQ_ENTRIES = 524287,
+ IRDMA_MIN_CEQ_ENTRIES = 1,
+ IRDMA_MAX_CEQ_ENTRIES = 262143,
+ IRDMA_MIN_CQ_SIZE = 1,
+ IRDMA_MAX_CQ_SIZE = 1048575,
+ IRDMA_DB_ID_ZERO = 0,
+ IRDMA_MAX_WQ_FRAGMENT_COUNT = 13,
+ IRDMA_MAX_SGE_RD = 13,
+ IRDMA_MAX_OUTBOUND_MSG_SIZE = 2147483647,
+ IRDMA_MAX_INBOUND_MSG_SIZE = 2147483647,
+ IRDMA_MAX_PUSH_PAGE_COUNT = 1024,
+ IRDMA_MAX_PE_ENA_VF_COUNT = 32,
+ IRDMA_MAX_VF_FPM_ID = 47,
+ IRDMA_MAX_SQ_PAYLOAD_SIZE = 2145386496,
+ IRDMA_MAX_INLINE_DATA_SIZE = 101,
+ IRDMA_MAX_WQ_ENTRIES = 32768,
+ IRDMA_Q2_BUF_SIZE = 256,
+ IRDMA_QP_CTX_SIZE = 256,
+ IRDMA_MAX_PDS = 262144,
+};
+
+enum irdma_addressing_type {
+ IRDMA_ADDR_TYPE_ZERO_BASED = 0,
+ IRDMA_ADDR_TYPE_VA_BASED = 1,
+};
+
+enum irdma_flush_opcode {
+ FLUSH_INVALID = 0,
+ FLUSH_GENERAL_ERR,
+ FLUSH_PROT_ERR,
+ FLUSH_REM_ACCESS_ERR,
+ FLUSH_LOC_QP_OP_ERR,
+ FLUSH_REM_OP_ERR,
+ FLUSH_LOC_LEN_ERR,
+ FLUSH_FATAL_ERR,
+};
+
+enum irdma_cmpl_status {
+ IRDMA_COMPL_STATUS_SUCCESS = 0,
+ IRDMA_COMPL_STATUS_FLUSHED,
+ IRDMA_COMPL_STATUS_INVALID_WQE,
+ IRDMA_COMPL_STATUS_QP_CATASTROPHIC,
+ IRDMA_COMPL_STATUS_REMOTE_TERMINATION,
+ IRDMA_COMPL_STATUS_INVALID_STAG,
+ IRDMA_COMPL_STATUS_BASE_BOUND_VIOLATION,
+ IRDMA_COMPL_STATUS_ACCESS_VIOLATION,
+ IRDMA_COMPL_STATUS_INVALID_PD_ID,
+ IRDMA_COMPL_STATUS_WRAP_ERROR,
+ IRDMA_COMPL_STATUS_STAG_INVALID_PDID,
+ IRDMA_COMPL_STATUS_RDMA_READ_ZERO_ORD,
+ IRDMA_COMPL_STATUS_QP_NOT_PRIVLEDGED,
+ IRDMA_COMPL_STATUS_STAG_NOT_INVALID,
+ IRDMA_COMPL_STATUS_INVALID_PHYS_BUF_SIZE,
+ IRDMA_COMPL_STATUS_INVALID_PHYS_BUF_ENTRY,
+ IRDMA_COMPL_STATUS_INVALID_FBO,
+ IRDMA_COMPL_STATUS_INVALID_LEN,
+ IRDMA_COMPL_STATUS_INVALID_ACCESS,
+ IRDMA_COMPL_STATUS_PHYS_BUF_LIST_TOO_LONG,
+ IRDMA_COMPL_STATUS_INVALID_VIRT_ADDRESS,
+ IRDMA_COMPL_STATUS_INVALID_REGION,
+ IRDMA_COMPL_STATUS_INVALID_WINDOW,
+ IRDMA_COMPL_STATUS_INVALID_TOTAL_LEN,
+ IRDMA_COMPL_STATUS_UNKNOWN,
+};
+
+enum irdma_cmpl_notify {
+ IRDMA_CQ_COMPL_EVENT = 0,
+ IRDMA_CQ_COMPL_SOLICITED = 1,
+};
+
+enum irdma_qp_caps {
+ IRDMA_WRITE_WITH_IMM = 1,
+ IRDMA_SEND_WITH_IMM = 2,
+ IRDMA_ROCE = 4,
+ IRDMA_PUSH_MODE = 8,
+};
+
+struct irdma_qp_uk;
+struct irdma_cq_uk;
+struct irdma_qp_uk_init_info;
+struct irdma_cq_uk_init_info;
+
+struct irdma_sge {
+ irdma_tagged_offset tag_off;
+ u32 len;
+ irdma_stag stag;
+};
+
+struct irdma_ring {
+ u32 head;
+ u32 tail;
+ u32 size;
+};
+
+struct irdma_cqe {
+ __le64 buf[IRDMA_CQE_SIZE];
+};
+
+struct irdma_extended_cqe {
+ __le64 buf[IRDMA_EXTENDED_CQE_SIZE];
+};
+
+struct irdma_post_send {
+ irdma_sgl sg_list;
+ u32 num_sges;
+ u32 qkey;
+ u32 dest_qp;
+ u32 ah_id;
+};
+
+struct irdma_post_inline_send {
+ void *data;
+ u32 len;
+ u32 qkey;
+ u32 dest_qp;
+ u32 ah_id;
+};
+
+struct irdma_post_rq_info {
+ u64 wr_id;
+ irdma_sgl sg_list;
+ u32 num_sges;
+};
+
+struct irdma_rdma_write {
+ irdma_sgl lo_sg_list;
+ u32 num_lo_sges;
+ struct irdma_sge rem_addr;
+};
+
+struct irdma_inline_rdma_write {
+ void *data;
+ u32 len;
+ struct irdma_sge rem_addr;
+};
+
+struct irdma_rdma_read {
+ irdma_sgl lo_sg_list;
+ u32 num_lo_sges;
+ struct irdma_sge rem_addr;
+};
+
+struct irdma_bind_window {
+ irdma_stag mr_stag;
+ u64 bind_len;
+ void *va;
+ enum irdma_addressing_type addressing_type;
+ bool ena_reads:1;
+ bool ena_writes:1;
+ irdma_stag mw_stag;
+ bool mem_window_type_1:1;
+};
+
+struct irdma_inv_local_stag {
+ irdma_stag target_stag;
+};
+
+struct irdma_post_sq_info {
+ u64 wr_id;
+ u8 op_type;
+ u8 l4len;
+ bool signaled:1;
+ bool read_fence:1;
+ bool local_fence:1;
+ bool inline_data:1;
+ bool imm_data_valid:1;
+ bool push_wqe:1;
+ bool report_rtt:1;
+ bool udp_hdr:1;
+ bool defer_flag:1;
+ u32 imm_data;
+ u32 stag_to_inv;
+ union {
+ struct irdma_post_send send;
+ struct irdma_rdma_write rdma_write;
+ struct irdma_rdma_read rdma_read;
+ struct irdma_bind_window bind_window;
+ struct irdma_inv_local_stag inv_local_stag;
+ struct irdma_inline_rdma_write inline_rdma_write;
+ struct irdma_post_inline_send inline_send;
+ } op;
+};
+
+struct irdma_cq_poll_info {
+ u64 wr_id;
+ irdma_qp_handle qp_handle;
+ u32 bytes_xfered;
+ u32 tcp_seq_num_rtt;
+ u32 qp_id;
+ u32 ud_src_qpn;
+ u32 imm_data;
+ irdma_stag inv_stag; /* or L_R_Key */
+ enum irdma_cmpl_status comp_status;
+ u16 major_err;
+ u16 minor_err;
+ u16 ud_vlan;
+ u8 ud_smac[6];
+ u8 op_type;
+ bool stag_invalid_set:1; /* or L_R_Key set */
+ bool push_dropped:1;
+ bool error:1;
+ bool solicited_event:1;
+ bool ipv4:1;
+ bool ud_vlan_valid:1;
+ bool ud_smac_valid:1;
+ bool imm_valid:1;
+};
+
+enum irdma_status_code irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info,
+ bool post_sq);
+enum irdma_status_code irdma_uk_inline_send(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info,
+ bool post_sq);
+enum irdma_status_code irdma_uk_mw_bind(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info,
+ bool post_sq);
+enum irdma_status_code irdma_uk_post_nop(struct irdma_qp_uk *qp, u64 wr_id,
+ bool signaled, bool post_sq);
+enum irdma_status_code irdma_uk_post_receive(struct irdma_qp_uk *qp,
+ struct irdma_post_rq_info *info);
+void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp);
+enum irdma_status_code irdma_uk_rdma_read(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info,
+ bool inv_stag, bool post_sq);
+enum irdma_status_code irdma_uk_rdma_write(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info,
+ bool post_sq);
+enum irdma_status_code irdma_uk_send(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info, bool post_sq);
+enum irdma_status_code irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info,
+ bool post_sq);
+
+struct irdma_wqe_uk_ops {
+ void (*iw_copy_inline_data)(u8 *dest, u8 *src, u32 len, u8 polarity);
+ u16 (*iw_inline_data_size_to_quanta)(u32 data_size);
+ void (*iw_set_fragment)(__le64 *wqe, u32 offset, struct irdma_sge *sge,
+ u8 valid);
+ void (*iw_set_mw_bind_wqe)(__le64 *wqe,
+ struct irdma_bind_window *op_info);
+};
+
+enum irdma_status_code irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
+ struct irdma_cq_poll_info *info);
+void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
+ enum irdma_cmpl_notify cq_notify);
+void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int size);
+void irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *qp, u16 cnt);
+enum irdma_status_code irdma_uk_cq_init(struct irdma_cq_uk *cq,
+ struct irdma_cq_uk_init_info *info);
+enum irdma_status_code irdma_uk_qp_init(struct irdma_qp_uk *qp,
+ struct irdma_qp_uk_init_info *info);
+struct irdma_sq_uk_wr_trk_info {
+ u64 wrid;
+ u32 wr_len;
+ u16 quanta;
+ u8 reserved[2];
+};
+
+struct irdma_qp_quanta {
+ __le64 elem[IRDMA_WQE_SIZE];
+};
+
+struct irdma_qp_uk {
+ struct irdma_qp_quanta *sq_base;
+ struct irdma_qp_quanta *rq_base;
+ struct irdma_uk_attrs *uk_attrs;
+ u32 __iomem *wqe_alloc_db;
+ struct irdma_sq_uk_wr_trk_info *sq_wrtrk_array;
+ u64 *rq_wrid_array;
+ __le64 *shadow_area;
+ __le32 *push_db;
+ __le64 *push_wqe;
+ struct irdma_ring sq_ring;
+ struct irdma_ring rq_ring;
+ struct irdma_ring initial_ring;
+ u32 qp_id;
+ u32 qp_caps;
+ u32 sq_size;
+ u32 rq_size;
+ u32 max_sq_frag_cnt;
+ u32 max_rq_frag_cnt;
+ u32 max_inline_data;
+ struct irdma_wqe_uk_ops wqe_ops;
+ u16 conn_wqes;
+ u8 qp_type;
+ u8 swqe_polarity;
+ u8 swqe_polarity_deferred;
+ u8 rwqe_polarity;
+ u8 rq_wqe_size;
+ u8 rq_wqe_size_multiplier;
+ bool deferred_flag:1;
+ bool push_mode:1; /* whether the last post wqe was pushed */
+ bool push_dropped:1;
+ bool first_sq_wq:1;
+ bool sq_flush_complete:1; /* Indicates flush was seen and SQ was empty after the flush */
+ bool rq_flush_complete:1; /* Indicates flush was seen and RQ was empty after the flush */
+ bool destroy_pending:1; /* Indicates the QP is being destroyed */
+ void *back_qp;
+ spinlock_t *lock;
+ u8 dbg_rq_flushed;
+ u8 sq_flush_seen;
+ u8 rq_flush_seen;
+};
+
+struct irdma_cq_uk {
+ struct irdma_cqe *cq_base;
+ u32 __iomem *cqe_alloc_db;
+ u32 __iomem *cq_ack_db;
+ __le64 *shadow_area;
+ u32 cq_id;
+ u32 cq_size;
+ struct irdma_ring cq_ring;
+ u8 polarity;
+ bool avoid_mem_cflct:1;
+};
+
+struct irdma_qp_uk_init_info {
+ struct irdma_qp_quanta *sq;
+ struct irdma_qp_quanta *rq;
+ struct irdma_uk_attrs *uk_attrs;
+ u32 __iomem *wqe_alloc_db;
+ __le64 *shadow_area;
+ struct irdma_sq_uk_wr_trk_info *sq_wrtrk_array;
+ u64 *rq_wrid_array;
+ u32 qp_id;
+ u32 qp_caps;
+ u32 sq_size;
+ u32 rq_size;
+ u32 max_sq_frag_cnt;
+ u32 max_rq_frag_cnt;
+ u32 max_inline_data;
+ u8 first_sq_wq;
+ u8 type;
+ int abi_ver;
+ bool legacy_mode;
+};
+
+struct irdma_cq_uk_init_info {
+ u32 __iomem *cqe_alloc_db;
+ u32 __iomem *cq_ack_db;
+ struct irdma_cqe *cq_base;
+ __le64 *shadow_area;
+ u32 cq_size;
+ u32 cq_id;
+ bool avoid_mem_cflct;
+};
+
+__le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
+ u16 quanta, u32 total_size,
+ struct irdma_post_sq_info *info);
+__le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx);
+void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq);
+enum irdma_status_code irdma_nop(struct irdma_qp_uk *qp, u64 wr_id,
+ bool signaled, bool post_sq);
+enum irdma_status_code irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta);
+enum irdma_status_code irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size);
+void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
+ u32 inline_data, u8 *shift);
+enum irdma_status_code irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs,
+ u32 sq_size, u8 shift, u32 *wqdepth);
+enum irdma_status_code irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs,
+ u32 rq_size, u8 shift, u32 *wqdepth);
+void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta,
+ u32 wqe_idx, bool post_sq);
+void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx);
+#endif /* IRDMA_USER_H */
diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
new file mode 100644
index 000000000000..5bbe44e54f9a
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/utils.c
@@ -0,0 +1,2541 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#include "main.h"
+
+/**
+ * irdma_arp_table -manage arp table
+ * @rf: RDMA PCI function
+ * @ip_addr: ip address for device
+ * @ipv4: IPv4 flag
+ * @mac_addr: mac address ptr
+ * @action: modify, delete or add
+ */
+int irdma_arp_table(struct irdma_pci_f *rf, u32 *ip_addr, bool ipv4,
+ u8 *mac_addr, u32 action)
+{
+ unsigned long flags;
+ int arp_index;
+ u32 ip[4] = {};
+
+ if (ipv4)
+ ip[0] = *ip_addr;
+ else
+ memcpy(ip, ip_addr, sizeof(ip));
+
+ spin_lock_irqsave(&rf->arp_lock, flags);
+ for (arp_index = 0; (u32)arp_index < rf->arp_table_size; arp_index++) {
+ if (!memcmp(rf->arp_table[arp_index].ip_addr, ip, sizeof(ip)))
+ break;
+ }
+
+ switch (action) {
+ case IRDMA_ARP_ADD:
+ if (arp_index != rf->arp_table_size) {
+ arp_index = -1;
+ break;
+ }
+
+ arp_index = 0;
+ if (irdma_alloc_rsrc(rf, rf->allocated_arps, rf->arp_table_size,
+ (u32 *)&arp_index, &rf->next_arp_index)) {
+ arp_index = -1;
+ break;
+ }
+
+ memcpy(rf->arp_table[arp_index].ip_addr, ip,
+ sizeof(rf->arp_table[arp_index].ip_addr));
+ ether_addr_copy(rf->arp_table[arp_index].mac_addr, mac_addr);
+ break;
+ case IRDMA_ARP_RESOLVE:
+ if (arp_index == rf->arp_table_size)
+ arp_index = -1;
+ break;
+ case IRDMA_ARP_DELETE:
+ if (arp_index == rf->arp_table_size) {
+ arp_index = -1;
+ break;
+ }
+
+ memset(rf->arp_table[arp_index].ip_addr, 0,
+ sizeof(rf->arp_table[arp_index].ip_addr));
+ eth_zero_addr(rf->arp_table[arp_index].mac_addr);
+ irdma_free_rsrc(rf, rf->allocated_arps, arp_index);
+ break;
+ default:
+ arp_index = -1;
+ break;
+ }
+
+ spin_unlock_irqrestore(&rf->arp_lock, flags);
+ return arp_index;
+}
+
+/**
+ * irdma_add_arp - add a new arp entry if needed
+ * @rf: RDMA function
+ * @ip: IP address
+ * @ipv4: IPv4 flag
+ * @mac: MAC address
+ */
+int irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, bool ipv4, u8 *mac)
+{
+ int arpidx;
+
+ arpidx = irdma_arp_table(rf, &ip[0], ipv4, NULL, IRDMA_ARP_RESOLVE);
+ if (arpidx >= 0) {
+ if (ether_addr_equal(rf->arp_table[arpidx].mac_addr, mac))
+ return arpidx;
+
+ irdma_manage_arp_cache(rf, rf->arp_table[arpidx].mac_addr, ip,
+ ipv4, IRDMA_ARP_DELETE);
+ }
+
+ irdma_manage_arp_cache(rf, mac, ip, ipv4, IRDMA_ARP_ADD);
+
+ return irdma_arp_table(rf, ip, ipv4, NULL, IRDMA_ARP_RESOLVE);
+}
+
+/**
+ * wr32 - write 32 bits to hw register
+ * @hw: hardware information including registers
+ * @reg: register offset
+ * @val: value to write to register
+ */
+inline void wr32(struct irdma_hw *hw, u32 reg, u32 val)
+{
+ writel(val, hw->hw_addr + reg);
+}
+
+/**
+ * rd32 - read a 32 bit hw register
+ * @hw: hardware information including registers
+ * @reg: register offset
+ *
+ * Return value of register content
+ */
+inline u32 rd32(struct irdma_hw *hw, u32 reg)
+{
+ return readl(hw->hw_addr + reg);
+}
+
+/**
+ * rd64 - read a 64 bit hw register
+ * @hw: hardware information including registers
+ * @reg: register offset
+ *
+ * Return value of register content
+ */
+inline u64 rd64(struct irdma_hw *hw, u32 reg)
+{
+ return readq(hw->hw_addr + reg);
+}
+
+static void irdma_gid_change_event(struct ib_device *ibdev)
+{
+ struct ib_event ib_event;
+
+ ib_event.event = IB_EVENT_GID_CHANGE;
+ ib_event.device = ibdev;
+ ib_event.element.port_num = 1;
+ ib_dispatch_event(&ib_event);
+}
+
+/**
+ * irdma_inetaddr_event - system notifier for ipv4 addr events
+ * @notifier: not used
+ * @event: event for notifier
+ * @ptr: if address
+ */
+int irdma_inetaddr_event(struct notifier_block *notifier, unsigned long event,
+ void *ptr)
+{
+ struct in_ifaddr *ifa = ptr;
+ struct net_device *netdev = ifa->ifa_dev->dev;
+ struct irdma_device *iwdev;
+ struct ib_device *ibdev;
+ u32 local_ipaddr;
+
+ ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_IRDMA);
+ if (!ibdev)
+ return NOTIFY_DONE;
+
+ iwdev = to_iwdev(ibdev);
+ local_ipaddr = ntohl(ifa->ifa_address);
+ ibdev_dbg(&iwdev->ibdev,
+ "DEV: netdev %p event %lu local_ip=%pI4 MAC=%pM\n", netdev,
+ event, &local_ipaddr, netdev->dev_addr);
+ switch (event) {
+ case NETDEV_DOWN:
+ irdma_manage_arp_cache(iwdev->rf, netdev->dev_addr,
+ &local_ipaddr, true, IRDMA_ARP_DELETE);
+ irdma_if_notify(iwdev, netdev, &local_ipaddr, true, false);
+ irdma_gid_change_event(&iwdev->ibdev);
+ break;
+ case NETDEV_UP:
+ case NETDEV_CHANGEADDR:
+ irdma_add_arp(iwdev->rf, &local_ipaddr, true, netdev->dev_addr);
+ irdma_if_notify(iwdev, netdev, &local_ipaddr, true, true);
+ irdma_gid_change_event(&iwdev->ibdev);
+ break;
+ default:
+ break;
+ }
+
+ ib_device_put(ibdev);
+
+ return NOTIFY_DONE;
+}
+
+/**
+ * irdma_inet6addr_event - system notifier for ipv6 addr events
+ * @notifier: not used
+ * @event: event for notifier
+ * @ptr: if address
+ */
+int irdma_inet6addr_event(struct notifier_block *notifier, unsigned long event,
+ void *ptr)
+{
+ struct inet6_ifaddr *ifa = ptr;
+ struct net_device *netdev = ifa->idev->dev;
+ struct irdma_device *iwdev;
+ struct ib_device *ibdev;
+ u32 local_ipaddr6[4];
+
+ ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_IRDMA);
+ if (!ibdev)
+ return NOTIFY_DONE;
+
+ iwdev = to_iwdev(ibdev);
+ irdma_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32);
+ ibdev_dbg(&iwdev->ibdev,
+ "DEV: netdev %p event %lu local_ip=%pI6 MAC=%pM\n", netdev,
+ event, local_ipaddr6, netdev->dev_addr);
+ switch (event) {
+ case NETDEV_DOWN:
+ irdma_manage_arp_cache(iwdev->rf, netdev->dev_addr,
+ local_ipaddr6, false, IRDMA_ARP_DELETE);
+ irdma_if_notify(iwdev, netdev, local_ipaddr6, false, false);
+ irdma_gid_change_event(&iwdev->ibdev);
+ break;
+ case NETDEV_UP:
+ case NETDEV_CHANGEADDR:
+ irdma_add_arp(iwdev->rf, local_ipaddr6, false,
+ netdev->dev_addr);
+ irdma_if_notify(iwdev, netdev, local_ipaddr6, false, true);
+ irdma_gid_change_event(&iwdev->ibdev);
+ break;
+ default:
+ break;
+ }
+
+ ib_device_put(ibdev);
+
+ return NOTIFY_DONE;
+}
+
+/**
+ * irdma_net_event - system notifier for net events
+ * @notifier: not used
+ * @event: event for notifier
+ * @ptr: neighbor
+ */
+int irdma_net_event(struct notifier_block *notifier, unsigned long event,
+ void *ptr)
+{
+ struct neighbour *neigh = ptr;
+ struct irdma_device *iwdev;
+ struct ib_device *ibdev;
+ __be32 *p;
+ u32 local_ipaddr[4] = {};
+ bool ipv4 = true;
+
+ ibdev = ib_device_get_by_netdev((struct net_device *)neigh->dev,
+ RDMA_DRIVER_IRDMA);
+ if (!ibdev)
+ return NOTIFY_DONE;
+
+ iwdev = to_iwdev(ibdev);
+
+ switch (event) {
+ case NETEVENT_NEIGH_UPDATE:
+ p = (__be32 *)neigh->primary_key;
+ if (neigh->tbl->family == AF_INET6) {
+ ipv4 = false;
+ irdma_copy_ip_ntohl(local_ipaddr, p);
+ } else {
+ local_ipaddr[0] = ntohl(*p);
+ }
+
+ ibdev_dbg(&iwdev->ibdev,
+ "DEV: netdev %p state %d local_ip=%pI4 MAC=%pM\n",
+ iwdev->netdev, neigh->nud_state, local_ipaddr,
+ neigh->ha);
+
+ if (neigh->nud_state & NUD_VALID)
+ irdma_add_arp(iwdev->rf, local_ipaddr, ipv4, neigh->ha);
+
+ else
+ irdma_manage_arp_cache(iwdev->rf, neigh->ha,
+ local_ipaddr, ipv4,
+ IRDMA_ARP_DELETE);
+ break;
+ default:
+ break;
+ }
+
+ ib_device_put(ibdev);
+
+ return NOTIFY_DONE;
+}
+
+/**
+ * irdma_netdevice_event - system notifier for netdev events
+ * @notifier: not used
+ * @event: event for notifier
+ * @ptr: netdev
+ */
+int irdma_netdevice_event(struct notifier_block *notifier, unsigned long event,
+ void *ptr)
+{
+ struct irdma_device *iwdev;
+ struct ib_device *ibdev;
+ struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+
+ ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_IRDMA);
+ if (!ibdev)
+ return NOTIFY_DONE;
+
+ iwdev = to_iwdev(ibdev);
+ iwdev->iw_status = 1;
+ switch (event) {
+ case NETDEV_DOWN:
+ iwdev->iw_status = 0;
+ fallthrough;
+ case NETDEV_UP:
+ irdma_port_ibevent(iwdev);
+ break;
+ default:
+ break;
+ }
+ ib_device_put(ibdev);
+
+ return NOTIFY_DONE;
+}
+
+/**
+ * irdma_add_ipv6_addr - add ipv6 address to the hw arp table
+ * @iwdev: irdma device
+ */
+static void irdma_add_ipv6_addr(struct irdma_device *iwdev)
+{
+ struct net_device *ip_dev;
+ struct inet6_dev *idev;
+ struct inet6_ifaddr *ifp, *tmp;
+ u32 local_ipaddr6[4];
+
+ rcu_read_lock();
+ for_each_netdev_rcu (&init_net, ip_dev) {
+ if (((rdma_vlan_dev_vlan_id(ip_dev) < 0xFFFF &&
+ rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev) ||
+ ip_dev == iwdev->netdev) &&
+ (READ_ONCE(ip_dev->flags) & IFF_UP)) {
+ idev = __in6_dev_get(ip_dev);
+ if (!idev) {
+ ibdev_err(&iwdev->ibdev, "ipv6 inet device not found\n");
+ break;
+ }
+ list_for_each_entry_safe (ifp, tmp, &idev->addr_list,
+ if_list) {
+ ibdev_dbg(&iwdev->ibdev,
+ "INIT: IP=%pI6, vlan_id=%d, MAC=%pM\n",
+ &ifp->addr,
+ rdma_vlan_dev_vlan_id(ip_dev),
+ ip_dev->dev_addr);
+
+ irdma_copy_ip_ntohl(local_ipaddr6,
+ ifp->addr.in6_u.u6_addr32);
+ irdma_manage_arp_cache(iwdev->rf,
+ ip_dev->dev_addr,
+ local_ipaddr6, false,
+ IRDMA_ARP_ADD);
+ }
+ }
+ }
+ rcu_read_unlock();
+}
+
+/**
+ * irdma_add_ipv4_addr - add ipv4 address to the hw arp table
+ * @iwdev: irdma device
+ */
+static void irdma_add_ipv4_addr(struct irdma_device *iwdev)
+{
+ struct net_device *dev;
+ struct in_device *idev;
+ u32 ip_addr;
+
+ rcu_read_lock();
+ for_each_netdev_rcu (&init_net, dev) {
+ if (((rdma_vlan_dev_vlan_id(dev) < 0xFFFF &&
+ rdma_vlan_dev_real_dev(dev) == iwdev->netdev) ||
+ dev == iwdev->netdev) && (READ_ONCE(dev->flags) & IFF_UP)) {
+ const struct in_ifaddr *ifa;
+
+ idev = __in_dev_get_rcu(dev);
+ if (!idev)
+ continue;
+
+ in_dev_for_each_ifa_rcu(ifa, idev) {
+ ibdev_dbg(&iwdev->ibdev, "CM: IP=%pI4, vlan_id=%d, MAC=%pM\n",
+ &ifa->ifa_address, rdma_vlan_dev_vlan_id(dev),
+ dev->dev_addr);
+
+ ip_addr = ntohl(ifa->ifa_address);
+ irdma_manage_arp_cache(iwdev->rf, dev->dev_addr,
+ &ip_addr, true,
+ IRDMA_ARP_ADD);
+ }
+ }
+ }
+ rcu_read_unlock();
+}
+
+/**
+ * irdma_add_ip - add ip addresses
+ * @iwdev: irdma device
+ *
+ * Add ipv4/ipv6 addresses to the arp cache
+ */
+void irdma_add_ip(struct irdma_device *iwdev)
+{
+ irdma_add_ipv4_addr(iwdev);
+ irdma_add_ipv6_addr(iwdev);
+}
+
+/**
+ * irdma_alloc_and_get_cqp_request - get cqp struct
+ * @cqp: device cqp ptr
+ * @wait: cqp to be used in wait mode
+ */
+struct irdma_cqp_request *irdma_alloc_and_get_cqp_request(struct irdma_cqp *cqp,
+ bool wait)
+{
+ struct irdma_cqp_request *cqp_request = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cqp->req_lock, flags);
+ if (!list_empty(&cqp->cqp_avail_reqs)) {
+ cqp_request = list_first_entry(&cqp->cqp_avail_reqs,
+ struct irdma_cqp_request, list);
+ list_del_init(&cqp_request->list);
+ }
+ spin_unlock_irqrestore(&cqp->req_lock, flags);
+ if (!cqp_request) {
+ cqp_request = kzalloc(sizeof(*cqp_request), GFP_ATOMIC);
+ if (cqp_request) {
+ cqp_request->dynamic = true;
+ if (wait)
+ init_waitqueue_head(&cqp_request->waitq);
+ }
+ }
+ if (!cqp_request) {
+ ibdev_dbg(to_ibdev(cqp->sc_cqp.dev), "ERR: CQP Request Fail: No Memory");
+ return NULL;
+ }
+
+ cqp_request->waiting = wait;
+ refcount_set(&cqp_request->refcnt, 1);
+ memset(&cqp_request->compl_info, 0, sizeof(cqp_request->compl_info));
+
+ return cqp_request;
+}
+
+/**
+ * irdma_get_cqp_request - increase refcount for cqp_request
+ * @cqp_request: pointer to cqp_request instance
+ */
+static inline void irdma_get_cqp_request(struct irdma_cqp_request *cqp_request)
+{
+ refcount_inc(&cqp_request->refcnt);
+}
+
+/**
+ * irdma_free_cqp_request - free cqp request
+ * @cqp: cqp ptr
+ * @cqp_request: to be put back in cqp list
+ */
+void irdma_free_cqp_request(struct irdma_cqp *cqp,
+ struct irdma_cqp_request *cqp_request)
+{
+ unsigned long flags;
+
+ if (cqp_request->dynamic) {
+ kfree(cqp_request);
+ } else {
+ cqp_request->request_done = false;
+ cqp_request->callback_fcn = NULL;
+ cqp_request->waiting = false;
+
+ spin_lock_irqsave(&cqp->req_lock, flags);
+ list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs);
+ spin_unlock_irqrestore(&cqp->req_lock, flags);
+ }
+ wake_up(&cqp->remove_wq);
+}
+
+/**
+ * irdma_put_cqp_request - dec ref count and free if 0
+ * @cqp: cqp ptr
+ * @cqp_request: to be put back in cqp list
+ */
+void irdma_put_cqp_request(struct irdma_cqp *cqp,
+ struct irdma_cqp_request *cqp_request)
+{
+ if (refcount_dec_and_test(&cqp_request->refcnt))
+ irdma_free_cqp_request(cqp, cqp_request);
+}
+
+/**
+ * irdma_free_pending_cqp_request -free pending cqp request objs
+ * @cqp: cqp ptr
+ * @cqp_request: to be put back in cqp list
+ */
+static void
+irdma_free_pending_cqp_request(struct irdma_cqp *cqp,
+ struct irdma_cqp_request *cqp_request)
+{
+ if (cqp_request->waiting) {
+ cqp_request->compl_info.error = true;
+ cqp_request->request_done = true;
+ wake_up(&cqp_request->waitq);
+ }
+ wait_event_timeout(cqp->remove_wq,
+ refcount_read(&cqp_request->refcnt) == 1, 1000);
+ irdma_put_cqp_request(cqp, cqp_request);
+}
+
+/**
+ * irdma_cleanup_pending_cqp_op - clean-up cqp with no
+ * completions
+ * @rf: RDMA PCI function
+ */
+void irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf)
+{
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct irdma_cqp *cqp = &rf->cqp;
+ struct irdma_cqp_request *cqp_request = NULL;
+ struct cqp_cmds_info *pcmdinfo = NULL;
+ u32 i, pending_work, wqe_idx;
+
+ pending_work = IRDMA_RING_USED_QUANTA(cqp->sc_cqp.sq_ring);
+ wqe_idx = IRDMA_RING_CURRENT_TAIL(cqp->sc_cqp.sq_ring);
+ for (i = 0; i < pending_work; i++) {
+ cqp_request = (struct irdma_cqp_request *)(unsigned long)
+ cqp->scratch_array[wqe_idx];
+ if (cqp_request)
+ irdma_free_pending_cqp_request(cqp, cqp_request);
+ wqe_idx = (wqe_idx + 1) % IRDMA_RING_SIZE(cqp->sc_cqp.sq_ring);
+ }
+
+ while (!list_empty(&dev->cqp_cmd_head)) {
+ pcmdinfo = irdma_remove_cqp_head(dev);
+ cqp_request =
+ container_of(pcmdinfo, struct irdma_cqp_request, info);
+ if (cqp_request)
+ irdma_free_pending_cqp_request(cqp, cqp_request);
+ }
+}
+
+/**
+ * irdma_wait_event - wait for completion
+ * @rf: RDMA PCI function
+ * @cqp_request: cqp request to wait
+ */
+static enum irdma_status_code irdma_wait_event(struct irdma_pci_f *rf,
+ struct irdma_cqp_request *cqp_request)
+{
+ struct irdma_cqp_timeout cqp_timeout = {};
+ bool cqp_error = false;
+ enum irdma_status_code err_code = 0;
+
+ cqp_timeout.compl_cqp_cmds = rf->sc_dev.cqp_cmd_stats[IRDMA_OP_CMPL_CMDS];
+ do {
+ irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
+ if (wait_event_timeout(cqp_request->waitq,
+ cqp_request->request_done,
+ msecs_to_jiffies(CQP_COMPL_WAIT_TIME_MS)))
+ break;
+
+ irdma_check_cqp_progress(&cqp_timeout, &rf->sc_dev);
+
+ if (cqp_timeout.count < CQP_TIMEOUT_THRESHOLD)
+ continue;
+
+ if (!rf->reset) {
+ rf->reset = true;
+ rf->gen_ops.request_reset(rf);
+ }
+ return IRDMA_ERR_TIMEOUT;
+ } while (1);
+
+ cqp_error = cqp_request->compl_info.error;
+ if (cqp_error) {
+ err_code = IRDMA_ERR_CQP_COMPL_ERROR;
+ if (cqp_request->compl_info.maj_err_code == 0xFFFF &&
+ cqp_request->compl_info.min_err_code == 0x8029) {
+ if (!rf->reset) {
+ rf->reset = true;
+ rf->gen_ops.request_reset(rf);
+ }
+ }
+ }
+
+ return err_code;
+}
+
+static const char *const irdma_cqp_cmd_names[IRDMA_MAX_CQP_OPS] = {
+ [IRDMA_OP_CEQ_DESTROY] = "Destroy CEQ Cmd",
+ [IRDMA_OP_AEQ_DESTROY] = "Destroy AEQ Cmd",
+ [IRDMA_OP_DELETE_ARP_CACHE_ENTRY] = "Delete ARP Cache Cmd",
+ [IRDMA_OP_MANAGE_APBVT_ENTRY] = "Manage APBV Table Entry Cmd",
+ [IRDMA_OP_CEQ_CREATE] = "CEQ Create Cmd",
+ [IRDMA_OP_AEQ_CREATE] = "AEQ Destroy Cmd",
+ [IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY] = "Manage Quad Hash Table Entry Cmd",
+ [IRDMA_OP_QP_MODIFY] = "Modify QP Cmd",
+ [IRDMA_OP_QP_UPLOAD_CONTEXT] = "Upload Context Cmd",
+ [IRDMA_OP_CQ_CREATE] = "Create CQ Cmd",
+ [IRDMA_OP_CQ_DESTROY] = "Destroy CQ Cmd",
+ [IRDMA_OP_QP_CREATE] = "Create QP Cmd",
+ [IRDMA_OP_QP_DESTROY] = "Destroy QP Cmd",
+ [IRDMA_OP_ALLOC_STAG] = "Allocate STag Cmd",
+ [IRDMA_OP_MR_REG_NON_SHARED] = "Register Non-Shared MR Cmd",
+ [IRDMA_OP_DEALLOC_STAG] = "Deallocate STag Cmd",
+ [IRDMA_OP_MW_ALLOC] = "Allocate Memory Window Cmd",
+ [IRDMA_OP_QP_FLUSH_WQES] = "Flush QP Cmd",
+ [IRDMA_OP_ADD_ARP_CACHE_ENTRY] = "Add ARP Cache Cmd",
+ [IRDMA_OP_MANAGE_PUSH_PAGE] = "Manage Push Page Cmd",
+ [IRDMA_OP_UPDATE_PE_SDS] = "Update PE SDs Cmd",
+ [IRDMA_OP_MANAGE_HMC_PM_FUNC_TABLE] = "Manage HMC PM Function Table Cmd",
+ [IRDMA_OP_SUSPEND] = "Suspend QP Cmd",
+ [IRDMA_OP_RESUME] = "Resume QP Cmd",
+ [IRDMA_OP_MANAGE_VF_PBLE_BP] = "Manage VF PBLE Backing Pages Cmd",
+ [IRDMA_OP_QUERY_FPM_VAL] = "Query FPM Values Cmd",
+ [IRDMA_OP_COMMIT_FPM_VAL] = "Commit FPM Values Cmd",
+ [IRDMA_OP_AH_CREATE] = "Create Address Handle Cmd",
+ [IRDMA_OP_AH_MODIFY] = "Modify Address Handle Cmd",
+ [IRDMA_OP_AH_DESTROY] = "Destroy Address Handle Cmd",
+ [IRDMA_OP_MC_CREATE] = "Create Multicast Group Cmd",
+ [IRDMA_OP_MC_DESTROY] = "Destroy Multicast Group Cmd",
+ [IRDMA_OP_MC_MODIFY] = "Modify Multicast Group Cmd",
+ [IRDMA_OP_STATS_ALLOCATE] = "Add Statistics Instance Cmd",
+ [IRDMA_OP_STATS_FREE] = "Free Statistics Instance Cmd",
+ [IRDMA_OP_STATS_GATHER] = "Gather Statistics Cmd",
+ [IRDMA_OP_WS_ADD_NODE] = "Add Work Scheduler Node Cmd",
+ [IRDMA_OP_WS_MODIFY_NODE] = "Modify Work Scheduler Node Cmd",
+ [IRDMA_OP_WS_DELETE_NODE] = "Delete Work Scheduler Node Cmd",
+ [IRDMA_OP_SET_UP_MAP] = "Set UP-UP Mapping Cmd",
+ [IRDMA_OP_GEN_AE] = "Generate AE Cmd",
+ [IRDMA_OP_QUERY_RDMA_FEATURES] = "RDMA Get Features Cmd",
+ [IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY] = "Allocate Local MAC Entry Cmd",
+ [IRDMA_OP_ADD_LOCAL_MAC_ENTRY] = "Add Local MAC Entry Cmd",
+ [IRDMA_OP_DELETE_LOCAL_MAC_ENTRY] = "Delete Local MAC Entry Cmd",
+ [IRDMA_OP_CQ_MODIFY] = "CQ Modify Cmd",
+};
+
+static const struct irdma_cqp_err_info irdma_noncrit_err_list[] = {
+ {0xffff, 0x8006, "Flush No Wqe Pending"},
+ {0xffff, 0x8007, "Modify QP Bad Close"},
+ {0xffff, 0x8009, "LLP Closed"},
+ {0xffff, 0x800a, "Reset Not Sent"}
+};
+
+/**
+ * irdma_cqp_crit_err - check if CQP error is critical
+ * @dev: pointer to dev structure
+ * @cqp_cmd: code for last CQP operation
+ * @maj_err_code: major error code
+ * @min_err_code: minot error code
+ */
+bool irdma_cqp_crit_err(struct irdma_sc_dev *dev, u8 cqp_cmd,
+ u16 maj_err_code, u16 min_err_code)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(irdma_noncrit_err_list); ++i) {
+ if (maj_err_code == irdma_noncrit_err_list[i].maj &&
+ min_err_code == irdma_noncrit_err_list[i].min) {
+ ibdev_dbg(to_ibdev(dev),
+ "CQP: [%s Error][%s] maj=0x%x min=0x%x\n",
+ irdma_noncrit_err_list[i].desc,
+ irdma_cqp_cmd_names[cqp_cmd], maj_err_code,
+ min_err_code);
+ return false;
+ }
+ }
+ return true;
+}
+
+/**
+ * irdma_handle_cqp_op - process cqp command
+ * @rf: RDMA PCI function
+ * @cqp_request: cqp request to process
+ */
+enum irdma_status_code irdma_handle_cqp_op(struct irdma_pci_f *rf,
+ struct irdma_cqp_request *cqp_request)
+{
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct cqp_cmds_info *info = &cqp_request->info;
+ enum irdma_status_code status;
+ bool put_cqp_request = true;
+
+ if (rf->reset)
+ return IRDMA_ERR_NOT_READY;
+
+ irdma_get_cqp_request(cqp_request);
+ status = irdma_process_cqp_cmd(dev, info);
+ if (status)
+ goto err;
+
+ if (cqp_request->waiting) {
+ put_cqp_request = false;
+ status = irdma_wait_event(rf, cqp_request);
+ if (status)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ if (irdma_cqp_crit_err(dev, info->cqp_cmd,
+ cqp_request->compl_info.maj_err_code,
+ cqp_request->compl_info.min_err_code))
+ ibdev_err(&rf->iwdev->ibdev,
+ "[%s Error][op_code=%d] status=%d waiting=%d completion_err=%d maj=0x%x min=0x%x\n",
+ irdma_cqp_cmd_names[info->cqp_cmd], info->cqp_cmd, status, cqp_request->waiting,
+ cqp_request->compl_info.error, cqp_request->compl_info.maj_err_code,
+ cqp_request->compl_info.min_err_code);
+
+ if (put_cqp_request)
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+
+ return status;
+}
+
+void irdma_qp_add_ref(struct ib_qp *ibqp)
+{
+ struct irdma_qp *iwqp = (struct irdma_qp *)ibqp;
+
+ refcount_inc(&iwqp->refcnt);
+}
+
+void irdma_qp_rem_ref(struct ib_qp *ibqp)
+{
+ struct irdma_qp *iwqp = to_iwqp(ibqp);
+ struct irdma_device *iwdev = iwqp->iwdev;
+ u32 qp_num;
+ unsigned long flags;
+
+ spin_lock_irqsave(&iwdev->rf->qptable_lock, flags);
+ if (!refcount_dec_and_test(&iwqp->refcnt)) {
+ spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
+ return;
+ }
+
+ qp_num = iwqp->ibqp.qp_num;
+ iwdev->rf->qp_table[qp_num] = NULL;
+ spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
+ complete(&iwqp->free_qp);
+}
+
+struct ib_device *to_ibdev(struct irdma_sc_dev *dev)
+{
+ return &(container_of(dev, struct irdma_pci_f, sc_dev))->iwdev->ibdev;
+}
+
+/**
+ * irdma_get_qp - get qp address
+ * @device: iwarp device
+ * @qpn: qp number
+ */
+struct ib_qp *irdma_get_qp(struct ib_device *device, int qpn)
+{
+ struct irdma_device *iwdev = to_iwdev(device);
+
+ if (qpn < IW_FIRST_QPN || qpn >= iwdev->rf->max_qp)
+ return NULL;
+
+ return &iwdev->rf->qp_table[qpn]->ibqp;
+}
+
+/**
+ * irdma_get_hw_addr - return hw addr
+ * @par: points to shared dev
+ */
+u8 __iomem *irdma_get_hw_addr(void *par)
+{
+ struct irdma_sc_dev *dev = par;
+
+ return dev->hw->hw_addr;
+}
+
+/**
+ * irdma_remove_cqp_head - return head entry and remove
+ * @dev: device
+ */
+void *irdma_remove_cqp_head(struct irdma_sc_dev *dev)
+{
+ struct list_head *entry;
+ struct list_head *list = &dev->cqp_cmd_head;
+
+ if (list_empty(list))
+ return NULL;
+
+ entry = list->next;
+ list_del(entry);
+
+ return entry;
+}
+
+/**
+ * irdma_cqp_sds_cmd - create cqp command for sd
+ * @dev: hardware control device structure
+ * @sdinfo: information for sd cqp
+ *
+ */
+enum irdma_status_code irdma_cqp_sds_cmd(struct irdma_sc_dev *dev,
+ struct irdma_update_sds_info *sdinfo)
+{
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+ enum irdma_status_code status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
+ if (!cqp_request)
+ return IRDMA_ERR_NO_MEMORY;
+
+ cqp_info = &cqp_request->info;
+ memcpy(&cqp_info->in.u.update_pe_sds.info, sdinfo,
+ sizeof(cqp_info->in.u.update_pe_sds.info));
+ cqp_info->cqp_cmd = IRDMA_OP_UPDATE_PE_SDS;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.update_pe_sds.dev = dev;
+ cqp_info->in.u.update_pe_sds.scratch = (uintptr_t)cqp_request;
+
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_cqp_qp_suspend_resume - cqp command for suspend/resume
+ * @qp: hardware control qp
+ * @op: suspend or resume
+ */
+enum irdma_status_code irdma_cqp_qp_suspend_resume(struct irdma_sc_qp *qp,
+ u8 op)
+{
+ struct irdma_sc_dev *dev = qp->dev;
+ struct irdma_cqp_request *cqp_request;
+ struct irdma_sc_cqp *cqp = dev->cqp;
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+ enum irdma_status_code status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false);
+ if (!cqp_request)
+ return IRDMA_ERR_NO_MEMORY;
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = op;
+ cqp_info->in.u.suspend_resume.cqp = cqp;
+ cqp_info->in.u.suspend_resume.qp = qp;
+ cqp_info->in.u.suspend_resume.scratch = (uintptr_t)cqp_request;
+
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_term_modify_qp - modify qp for term message
+ * @qp: hardware control qp
+ * @next_state: qp's next state
+ * @term: terminate code
+ * @term_len: length
+ */
+void irdma_term_modify_qp(struct irdma_sc_qp *qp, u8 next_state, u8 term,
+ u8 term_len)
+{
+ struct irdma_qp *iwqp;
+
+ iwqp = qp->qp_uk.back_qp;
+ irdma_next_iw_state(iwqp, next_state, 0, term, term_len);
+};
+
+/**
+ * irdma_terminate_done - after terminate is completed
+ * @qp: hardware control qp
+ * @timeout_occurred: indicates if terminate timer expired
+ */
+void irdma_terminate_done(struct irdma_sc_qp *qp, int timeout_occurred)
+{
+ struct irdma_qp *iwqp;
+ u8 hte = 0;
+ bool first_time;
+ unsigned long flags;
+
+ iwqp = qp->qp_uk.back_qp;
+ spin_lock_irqsave(&iwqp->lock, flags);
+ if (iwqp->hte_added) {
+ iwqp->hte_added = 0;
+ hte = 1;
+ }
+ first_time = !(qp->term_flags & IRDMA_TERM_DONE);
+ qp->term_flags |= IRDMA_TERM_DONE;
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ if (first_time) {
+ if (!timeout_occurred)
+ irdma_terminate_del_timer(qp);
+
+ irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, hte, 0, 0);
+ irdma_cm_disconn(iwqp);
+ }
+}
+
+static void irdma_terminate_timeout(struct timer_list *t)
+{
+ struct irdma_qp *iwqp = from_timer(iwqp, t, terminate_timer);
+ struct irdma_sc_qp *qp = &iwqp->sc_qp;
+
+ irdma_terminate_done(qp, 1);
+ irdma_qp_rem_ref(&iwqp->ibqp);
+}
+
+/**
+ * irdma_terminate_start_timer - start terminate timeout
+ * @qp: hardware control qp
+ */
+void irdma_terminate_start_timer(struct irdma_sc_qp *qp)
+{
+ struct irdma_qp *iwqp;
+
+ iwqp = qp->qp_uk.back_qp;
+ irdma_qp_add_ref(&iwqp->ibqp);
+ timer_setup(&iwqp->terminate_timer, irdma_terminate_timeout, 0);
+ iwqp->terminate_timer.expires = jiffies + HZ;
+
+ add_timer(&iwqp->terminate_timer);
+}
+
+/**
+ * irdma_terminate_del_timer - delete terminate timeout
+ * @qp: hardware control qp
+ */
+void irdma_terminate_del_timer(struct irdma_sc_qp *qp)
+{
+ struct irdma_qp *iwqp;
+ int ret;
+
+ iwqp = qp->qp_uk.back_qp;
+ ret = del_timer(&iwqp->terminate_timer);
+ if (ret)
+ irdma_qp_rem_ref(&iwqp->ibqp);
+}
+
+/**
+ * irdma_cqp_query_fpm_val_cmd - send cqp command for fpm
+ * @dev: function device struct
+ * @val_mem: buffer for fpm
+ * @hmc_fn_id: function id for fpm
+ */
+enum irdma_status_code
+irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev,
+ struct irdma_dma_mem *val_mem, u8 hmc_fn_id)
+{
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+ enum irdma_status_code status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
+ if (!cqp_request)
+ return IRDMA_ERR_NO_MEMORY;
+
+ cqp_info = &cqp_request->info;
+ cqp_request->param = NULL;
+ cqp_info->in.u.query_fpm_val.cqp = dev->cqp;
+ cqp_info->in.u.query_fpm_val.fpm_val_pa = val_mem->pa;
+ cqp_info->in.u.query_fpm_val.fpm_val_va = val_mem->va;
+ cqp_info->in.u.query_fpm_val.hmc_fn_id = hmc_fn_id;
+ cqp_info->cqp_cmd = IRDMA_OP_QUERY_FPM_VAL;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.query_fpm_val.scratch = (uintptr_t)cqp_request;
+
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_cqp_commit_fpm_val_cmd - commit fpm values in hw
+ * @dev: hardware control device structure
+ * @val_mem: buffer with fpm values
+ * @hmc_fn_id: function id for fpm
+ */
+enum irdma_status_code
+irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev,
+ struct irdma_dma_mem *val_mem, u8 hmc_fn_id)
+{
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+ enum irdma_status_code status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
+ if (!cqp_request)
+ return IRDMA_ERR_NO_MEMORY;
+
+ cqp_info = &cqp_request->info;
+ cqp_request->param = NULL;
+ cqp_info->in.u.commit_fpm_val.cqp = dev->cqp;
+ cqp_info->in.u.commit_fpm_val.fpm_val_pa = val_mem->pa;
+ cqp_info->in.u.commit_fpm_val.fpm_val_va = val_mem->va;
+ cqp_info->in.u.commit_fpm_val.hmc_fn_id = hmc_fn_id;
+ cqp_info->cqp_cmd = IRDMA_OP_COMMIT_FPM_VAL;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.commit_fpm_val.scratch = (uintptr_t)cqp_request;
+
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_cqp_cq_create_cmd - create a cq for the cqp
+ * @dev: device pointer
+ * @cq: pointer to created cq
+ */
+enum irdma_status_code irdma_cqp_cq_create_cmd(struct irdma_sc_dev *dev,
+ struct irdma_sc_cq *cq)
+{
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+ struct irdma_cqp *iwcqp = &rf->cqp;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ enum irdma_status_code status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
+ if (!cqp_request)
+ return IRDMA_ERR_NO_MEMORY;
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = IRDMA_OP_CQ_CREATE;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.cq_create.cq = cq;
+ cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
+
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(iwcqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_cqp_qp_create_cmd - create a qp for the cqp
+ * @dev: device pointer
+ * @qp: pointer to created qp
+ */
+enum irdma_status_code irdma_cqp_qp_create_cmd(struct irdma_sc_dev *dev,
+ struct irdma_sc_qp *qp)
+{
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+ struct irdma_cqp *iwcqp = &rf->cqp;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_create_qp_info *qp_info;
+ enum irdma_status_code status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
+ if (!cqp_request)
+ return IRDMA_ERR_NO_MEMORY;
+
+ cqp_info = &cqp_request->info;
+ qp_info = &cqp_request->info.in.u.qp_create.info;
+ memset(qp_info, 0, sizeof(*qp_info));
+ qp_info->cq_num_valid = true;
+ qp_info->next_iwarp_state = IRDMA_QP_STATE_RTS;
+ cqp_info->cqp_cmd = IRDMA_OP_QP_CREATE;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.qp_create.qp = qp;
+ cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
+
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(iwcqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_dealloc_push_page - free a push page for qp
+ * @rf: RDMA PCI function
+ * @qp: hardware control qp
+ */
+static void irdma_dealloc_push_page(struct irdma_pci_f *rf,
+ struct irdma_sc_qp *qp)
+{
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ enum irdma_status_code status;
+
+ if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX)
+ return;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false);
+ if (!cqp_request)
+ return;
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = IRDMA_OP_MANAGE_PUSH_PAGE;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.manage_push_page.info.push_idx = qp->push_idx;
+ cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle;
+ cqp_info->in.u.manage_push_page.info.free_page = 1;
+ cqp_info->in.u.manage_push_page.info.push_page_type = 0;
+ cqp_info->in.u.manage_push_page.cqp = &rf->cqp.sc_cqp;
+ cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ if (!status)
+ qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX;
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+}
+
+/**
+ * irdma_free_qp_rsrc - free up memory resources for qp
+ * @iwqp: qp ptr (user or kernel)
+ */
+void irdma_free_qp_rsrc(struct irdma_qp *iwqp)
+{
+ struct irdma_device *iwdev = iwqp->iwdev;
+ struct irdma_pci_f *rf = iwdev->rf;
+ u32 qp_num = iwqp->ibqp.qp_num;
+
+ irdma_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp);
+ irdma_dealloc_push_page(rf, &iwqp->sc_qp);
+ if (iwqp->sc_qp.vsi) {
+ irdma_qp_rem_qos(&iwqp->sc_qp);
+ iwqp->sc_qp.dev->ws_remove(iwqp->sc_qp.vsi,
+ iwqp->sc_qp.user_pri);
+ }
+
+ if (qp_num > 2)
+ irdma_free_rsrc(rf, rf->allocated_qps, qp_num);
+ dma_free_coherent(rf->sc_dev.hw->device, iwqp->q2_ctx_mem.size,
+ iwqp->q2_ctx_mem.va, iwqp->q2_ctx_mem.pa);
+ iwqp->q2_ctx_mem.va = NULL;
+ dma_free_coherent(rf->sc_dev.hw->device, iwqp->kqp.dma_mem.size,
+ iwqp->kqp.dma_mem.va, iwqp->kqp.dma_mem.pa);
+ iwqp->kqp.dma_mem.va = NULL;
+ kfree(iwqp->kqp.sq_wrid_mem);
+ iwqp->kqp.sq_wrid_mem = NULL;
+ kfree(iwqp->kqp.rq_wrid_mem);
+ iwqp->kqp.rq_wrid_mem = NULL;
+ kfree(iwqp);
+}
+
+/**
+ * irdma_cq_wq_destroy - send cq destroy cqp
+ * @rf: RDMA PCI function
+ * @cq: hardware control cq
+ */
+void irdma_cq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_cq *cq)
+{
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
+ if (!cqp_request)
+ return;
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = IRDMA_OP_CQ_DESTROY;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.cq_destroy.cq = cq;
+ cqp_info->in.u.cq_destroy.scratch = (uintptr_t)cqp_request;
+
+ irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+}
+
+/**
+ * irdma_hw_modify_qp_callback - handle state for modifyQPs that don't wait
+ * @cqp_request: modify QP completion
+ */
+static void irdma_hw_modify_qp_callback(struct irdma_cqp_request *cqp_request)
+{
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_qp *iwqp;
+
+ cqp_info = &cqp_request->info;
+ iwqp = cqp_info->in.u.qp_modify.qp->qp_uk.back_qp;
+ atomic_dec(&iwqp->hw_mod_qp_pend);
+ wake_up(&iwqp->mod_qp_waitq);
+}
+
+/**
+ * irdma_hw_modify_qp - setup cqp for modify qp
+ * @iwdev: RDMA device
+ * @iwqp: qp ptr (user or kernel)
+ * @info: info for modify qp
+ * @wait: flag to wait or not for modify qp completion
+ */
+enum irdma_status_code irdma_hw_modify_qp(struct irdma_device *iwdev,
+ struct irdma_qp *iwqp,
+ struct irdma_modify_qp_info *info,
+ bool wait)
+{
+ enum irdma_status_code status;
+ struct irdma_pci_f *rf = iwdev->rf;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_modify_qp_info *m_info;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
+ if (!cqp_request)
+ return IRDMA_ERR_NO_MEMORY;
+
+ if (!wait) {
+ cqp_request->callback_fcn = irdma_hw_modify_qp_callback;
+ atomic_inc(&iwqp->hw_mod_qp_pend);
+ }
+ cqp_info = &cqp_request->info;
+ m_info = &cqp_info->in.u.qp_modify.info;
+ memcpy(m_info, info, sizeof(*m_info));
+ cqp_info->cqp_cmd = IRDMA_OP_QP_MODIFY;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp;
+ cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request;
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+ if (status) {
+ if (rdma_protocol_roce(&iwdev->ibdev, 1))
+ return status;
+
+ switch (m_info->next_iwarp_state) {
+ struct irdma_gen_ae_info ae_info;
+
+ case IRDMA_QP_STATE_RTS:
+ case IRDMA_QP_STATE_IDLE:
+ case IRDMA_QP_STATE_TERMINATE:
+ case IRDMA_QP_STATE_CLOSING:
+ if (info->curr_iwarp_state == IRDMA_QP_STATE_IDLE)
+ irdma_send_reset(iwqp->cm_node);
+ else
+ iwqp->sc_qp.term_flags = IRDMA_TERM_DONE;
+ if (!wait) {
+ ae_info.ae_code = IRDMA_AE_BAD_CLOSE;
+ ae_info.ae_src = 0;
+ irdma_gen_ae(rf, &iwqp->sc_qp, &ae_info, false);
+ } else {
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp,
+ wait);
+ if (!cqp_request)
+ return IRDMA_ERR_NO_MEMORY;
+
+ cqp_info = &cqp_request->info;
+ m_info = &cqp_info->in.u.qp_modify.info;
+ memcpy(m_info, info, sizeof(*m_info));
+ cqp_info->cqp_cmd = IRDMA_OP_QP_MODIFY;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp;
+ cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request;
+ m_info->next_iwarp_state = IRDMA_QP_STATE_ERROR;
+ m_info->reset_tcp_conn = true;
+ irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+ }
+ break;
+ case IRDMA_QP_STATE_ERROR:
+ default:
+ break;
+ }
+ }
+
+ return status;
+}
+
+/**
+ * irdma_cqp_cq_destroy_cmd - destroy the cqp cq
+ * @dev: device pointer
+ * @cq: pointer to cq
+ */
+void irdma_cqp_cq_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq)
+{
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+
+ irdma_cq_wq_destroy(rf, cq);
+}
+
+/**
+ * irdma_cqp_qp_destroy_cmd - destroy the cqp
+ * @dev: device pointer
+ * @qp: pointer to qp
+ */
+enum irdma_status_code irdma_cqp_qp_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
+{
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+ struct irdma_cqp *iwcqp = &rf->cqp;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ enum irdma_status_code status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
+ if (!cqp_request)
+ return IRDMA_ERR_NO_MEMORY;
+
+ cqp_info = &cqp_request->info;
+ memset(cqp_info, 0, sizeof(*cqp_info));
+ cqp_info->cqp_cmd = IRDMA_OP_QP_DESTROY;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.qp_destroy.qp = qp;
+ cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;
+ cqp_info->in.u.qp_destroy.remove_hash_idx = true;
+
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_ieq_mpa_crc_ae - generate AE for crc error
+ * @dev: hardware control device structure
+ * @qp: hardware control qp
+ */
+void irdma_ieq_mpa_crc_ae(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
+{
+ struct irdma_gen_ae_info info = {};
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+
+ ibdev_dbg(&rf->iwdev->ibdev, "AEQ: Generate MPA CRC AE\n");
+ info.ae_code = IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR;
+ info.ae_src = IRDMA_AE_SOURCE_RQ;
+ irdma_gen_ae(rf, qp, &info, false);
+}
+
+/**
+ * irdma_init_hash_desc - initialize hash for crc calculation
+ * @desc: cryption type
+ */
+enum irdma_status_code irdma_init_hash_desc(struct shash_desc **desc)
+{
+ struct crypto_shash *tfm;
+ struct shash_desc *tdesc;
+
+ tfm = crypto_alloc_shash("crc32c", 0, 0);
+ if (IS_ERR(tfm))
+ return IRDMA_ERR_MPA_CRC;
+
+ tdesc = kzalloc(sizeof(*tdesc) + crypto_shash_descsize(tfm),
+ GFP_KERNEL);
+ if (!tdesc) {
+ crypto_free_shash(tfm);
+ return IRDMA_ERR_MPA_CRC;
+ }
+
+ tdesc->tfm = tfm;
+ *desc = tdesc;
+
+ return 0;
+}
+
+/**
+ * irdma_free_hash_desc - free hash desc
+ * @desc: to be freed
+ */
+void irdma_free_hash_desc(struct shash_desc *desc)
+{
+ if (desc) {
+ crypto_free_shash(desc->tfm);
+ kfree(desc);
+ }
+}
+
+/**
+ * irdma_ieq_check_mpacrc - check if mpa crc is OK
+ * @desc: desc for hash
+ * @addr: address of buffer for crc
+ * @len: length of buffer
+ * @val: value to be compared
+ */
+enum irdma_status_code irdma_ieq_check_mpacrc(struct shash_desc *desc,
+ void *addr, u32 len, u32 val)
+{
+ u32 crc = 0;
+ int ret;
+ enum irdma_status_code ret_code = 0;
+
+ crypto_shash_init(desc);
+ ret = crypto_shash_update(desc, addr, len);
+ if (!ret)
+ crypto_shash_final(desc, (u8 *)&crc);
+ if (crc != val)
+ ret_code = IRDMA_ERR_MPA_CRC;
+
+ return ret_code;
+}
+
+/**
+ * irdma_ieq_get_qp - get qp based on quad in puda buffer
+ * @dev: hardware control device structure
+ * @buf: receive puda buffer on exception q
+ */
+struct irdma_sc_qp *irdma_ieq_get_qp(struct irdma_sc_dev *dev,
+ struct irdma_puda_buf *buf)
+{
+ struct irdma_qp *iwqp;
+ struct irdma_cm_node *cm_node;
+ struct irdma_device *iwdev = buf->vsi->back_vsi;
+ u32 loc_addr[4] = {};
+ u32 rem_addr[4] = {};
+ u16 loc_port, rem_port;
+ struct ipv6hdr *ip6h;
+ struct iphdr *iph = (struct iphdr *)buf->iph;
+ struct tcphdr *tcph = (struct tcphdr *)buf->tcph;
+
+ if (iph->version == 4) {
+ loc_addr[0] = ntohl(iph->daddr);
+ rem_addr[0] = ntohl(iph->saddr);
+ } else {
+ ip6h = (struct ipv6hdr *)buf->iph;
+ irdma_copy_ip_ntohl(loc_addr, ip6h->daddr.in6_u.u6_addr32);
+ irdma_copy_ip_ntohl(rem_addr, ip6h->saddr.in6_u.u6_addr32);
+ }
+ loc_port = ntohs(tcph->dest);
+ rem_port = ntohs(tcph->source);
+ cm_node = irdma_find_node(&iwdev->cm_core, rem_port, rem_addr, loc_port,
+ loc_addr, buf->vlan_valid ? buf->vlan_id : 0xFFFF);
+ if (!cm_node)
+ return NULL;
+
+ iwqp = cm_node->iwqp;
+ irdma_rem_ref_cm_node(cm_node);
+
+ return &iwqp->sc_qp;
+}
+
+/**
+ * irdma_send_ieq_ack - ACKs for duplicate or OOO partials FPDUs
+ * @qp: qp ptr
+ */
+void irdma_send_ieq_ack(struct irdma_sc_qp *qp)
+{
+ struct irdma_cm_node *cm_node = ((struct irdma_qp *)qp->qp_uk.back_qp)->cm_node;
+ struct irdma_puda_buf *buf = qp->pfpdu.lastrcv_buf;
+ struct tcphdr *tcph = (struct tcphdr *)buf->tcph;
+
+ cm_node->tcp_cntxt.rcv_nxt = qp->pfpdu.nextseqnum;
+ cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
+
+ irdma_send_ack(cm_node);
+}
+
+/**
+ * irdma_puda_ieq_get_ah_info - get AH info from IEQ buffer
+ * @qp: qp pointer
+ * @ah_info: AH info pointer
+ */
+void irdma_puda_ieq_get_ah_info(struct irdma_sc_qp *qp,
+ struct irdma_ah_info *ah_info)
+{
+ struct irdma_puda_buf *buf = qp->pfpdu.ah_buf;
+ struct iphdr *iph;
+ struct ipv6hdr *ip6h;
+
+ memset(ah_info, 0, sizeof(*ah_info));
+ ah_info->do_lpbk = true;
+ ah_info->vlan_tag = buf->vlan_id;
+ ah_info->insert_vlan_tag = buf->vlan_valid;
+ ah_info->ipv4_valid = buf->ipv4;
+ ah_info->vsi = qp->vsi;
+
+ if (buf->smac_valid)
+ ether_addr_copy(ah_info->mac_addr, buf->smac);
+
+ if (buf->ipv4) {
+ ah_info->ipv4_valid = true;
+ iph = (struct iphdr *)buf->iph;
+ ah_info->hop_ttl = iph->ttl;
+ ah_info->tc_tos = iph->tos;
+ ah_info->dest_ip_addr[0] = ntohl(iph->daddr);
+ ah_info->src_ip_addr[0] = ntohl(iph->saddr);
+ } else {
+ ip6h = (struct ipv6hdr *)buf->iph;
+ ah_info->hop_ttl = ip6h->hop_limit;
+ ah_info->tc_tos = ip6h->priority;
+ irdma_copy_ip_ntohl(ah_info->dest_ip_addr,
+ ip6h->daddr.in6_u.u6_addr32);
+ irdma_copy_ip_ntohl(ah_info->src_ip_addr,
+ ip6h->saddr.in6_u.u6_addr32);
+ }
+
+ ah_info->dst_arpindex = irdma_arp_table(dev_to_rf(qp->dev),
+ ah_info->dest_ip_addr,
+ ah_info->ipv4_valid,
+ NULL, IRDMA_ARP_RESOLVE);
+}
+
+/**
+ * irdma_gen1_ieq_update_tcpip_info - update tcpip in the buffer
+ * @buf: puda to update
+ * @len: length of buffer
+ * @seqnum: seq number for tcp
+ */
+static void irdma_gen1_ieq_update_tcpip_info(struct irdma_puda_buf *buf,
+ u16 len, u32 seqnum)
+{
+ struct tcphdr *tcph;
+ struct iphdr *iph;
+ u16 iphlen;
+ u16 pktsize;
+ u8 *addr = buf->mem.va;
+
+ iphlen = (buf->ipv4) ? 20 : 40;
+ iph = (struct iphdr *)(addr + buf->maclen);
+ tcph = (struct tcphdr *)(addr + buf->maclen + iphlen);
+ pktsize = len + buf->tcphlen + iphlen;
+ iph->tot_len = htons(pktsize);
+ tcph->seq = htonl(seqnum);
+}
+
+/**
+ * irdma_ieq_update_tcpip_info - update tcpip in the buffer
+ * @buf: puda to update
+ * @len: length of buffer
+ * @seqnum: seq number for tcp
+ */
+void irdma_ieq_update_tcpip_info(struct irdma_puda_buf *buf, u16 len,
+ u32 seqnum)
+{
+ struct tcphdr *tcph;
+ u8 *addr;
+
+ if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
+ return irdma_gen1_ieq_update_tcpip_info(buf, len, seqnum);
+
+ addr = buf->mem.va;
+ tcph = (struct tcphdr *)addr;
+ tcph->seq = htonl(seqnum);
+}
+
+/**
+ * irdma_gen1_puda_get_tcpip_info - get tcpip info from puda
+ * buffer
+ * @info: to get information
+ * @buf: puda buffer
+ */
+static enum irdma_status_code
+irdma_gen1_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
+ struct irdma_puda_buf *buf)
+{
+ struct iphdr *iph;
+ struct ipv6hdr *ip6h;
+ struct tcphdr *tcph;
+ u16 iphlen;
+ u16 pkt_len;
+ u8 *mem = buf->mem.va;
+ struct ethhdr *ethh = buf->mem.va;
+
+ if (ethh->h_proto == htons(0x8100)) {
+ info->vlan_valid = true;
+ buf->vlan_id = ntohs(((struct vlan_ethhdr *)ethh)->h_vlan_TCI) &
+ VLAN_VID_MASK;
+ }
+
+ buf->maclen = (info->vlan_valid) ? 18 : 14;
+ iphlen = (info->l3proto) ? 40 : 20;
+ buf->ipv4 = (info->l3proto) ? false : true;
+ buf->iph = mem + buf->maclen;
+ iph = (struct iphdr *)buf->iph;
+ buf->tcph = buf->iph + iphlen;
+ tcph = (struct tcphdr *)buf->tcph;
+
+ if (buf->ipv4) {
+ pkt_len = ntohs(iph->tot_len);
+ } else {
+ ip6h = (struct ipv6hdr *)buf->iph;
+ pkt_len = ntohs(ip6h->payload_len) + iphlen;
+ }
+
+ buf->totallen = pkt_len + buf->maclen;
+
+ if (info->payload_len < buf->totallen) {
+ ibdev_dbg(to_ibdev(buf->vsi->dev),
+ "ERR: payload_len = 0x%x totallen expected0x%x\n",
+ info->payload_len, buf->totallen);
+ return IRDMA_ERR_INVALID_SIZE;
+ }
+
+ buf->tcphlen = tcph->doff << 2;
+ buf->datalen = pkt_len - iphlen - buf->tcphlen;
+ buf->data = buf->datalen ? buf->tcph + buf->tcphlen : NULL;
+ buf->hdrlen = buf->maclen + iphlen + buf->tcphlen;
+ buf->seqnum = ntohl(tcph->seq);
+
+ return 0;
+}
+
+/**
+ * irdma_puda_get_tcpip_info - get tcpip info from puda buffer
+ * @info: to get information
+ * @buf: puda buffer
+ */
+enum irdma_status_code
+irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
+ struct irdma_puda_buf *buf)
+{
+ struct tcphdr *tcph;
+ u32 pkt_len;
+ u8 *mem;
+
+ if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
+ return irdma_gen1_puda_get_tcpip_info(info, buf);
+
+ mem = buf->mem.va;
+ buf->vlan_valid = info->vlan_valid;
+ if (info->vlan_valid)
+ buf->vlan_id = info->vlan;
+
+ buf->ipv4 = info->ipv4;
+ if (buf->ipv4)
+ buf->iph = mem + IRDMA_IPV4_PAD;
+ else
+ buf->iph = mem;
+
+ buf->tcph = mem + IRDMA_TCP_OFFSET;
+ tcph = (struct tcphdr *)buf->tcph;
+ pkt_len = info->payload_len;
+ buf->totallen = pkt_len;
+ buf->tcphlen = tcph->doff << 2;
+ buf->datalen = pkt_len - IRDMA_TCP_OFFSET - buf->tcphlen;
+ buf->data = buf->datalen ? buf->tcph + buf->tcphlen : NULL;
+ buf->hdrlen = IRDMA_TCP_OFFSET + buf->tcphlen;
+ buf->seqnum = ntohl(tcph->seq);
+
+ if (info->smac_valid) {
+ ether_addr_copy(buf->smac, info->smac);
+ buf->smac_valid = true;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_hw_stats_timeout - Stats timer-handler which updates all HW stats
+ * @t: timer_list pointer
+ */
+static void irdma_hw_stats_timeout(struct timer_list *t)
+{
+ struct irdma_vsi_pestat *pf_devstat =
+ from_timer(pf_devstat, t, stats_timer);
+ struct irdma_sc_vsi *sc_vsi = pf_devstat->vsi;
+
+ if (sc_vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
+ irdma_cqp_gather_stats_gen1(sc_vsi->dev, sc_vsi->pestat);
+ else
+ irdma_cqp_gather_stats_cmd(sc_vsi->dev, sc_vsi->pestat, false);
+
+ mod_timer(&pf_devstat->stats_timer,
+ jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
+}
+
+/**
+ * irdma_hw_stats_start_timer - Start periodic stats timer
+ * @vsi: vsi structure pointer
+ */
+void irdma_hw_stats_start_timer(struct irdma_sc_vsi *vsi)
+{
+ struct irdma_vsi_pestat *devstat = vsi->pestat;
+
+ timer_setup(&devstat->stats_timer, irdma_hw_stats_timeout, 0);
+ mod_timer(&devstat->stats_timer,
+ jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
+}
+
+/**
+ * irdma_hw_stats_stop_timer - Delete periodic stats timer
+ * @vsi: pointer to vsi structure
+ */
+void irdma_hw_stats_stop_timer(struct irdma_sc_vsi *vsi)
+{
+ struct irdma_vsi_pestat *devstat = vsi->pestat;
+
+ del_timer_sync(&devstat->stats_timer);
+}
+
+/**
+ * irdma_process_stats - Checking for wrap and update stats
+ * @pestat: stats structure pointer
+ */
+static inline void irdma_process_stats(struct irdma_vsi_pestat *pestat)
+{
+ sc_vsi_update_stats(pestat->vsi);
+}
+
+/**
+ * irdma_cqp_gather_stats_gen1 - Gather stats
+ * @dev: pointer to device structure
+ * @pestat: statistics structure
+ */
+void irdma_cqp_gather_stats_gen1(struct irdma_sc_dev *dev,
+ struct irdma_vsi_pestat *pestat)
+{
+ struct irdma_gather_stats *gather_stats =
+ pestat->gather_info.gather_stats_va;
+ u32 stats_inst_offset_32;
+ u32 stats_inst_offset_64;
+
+ stats_inst_offset_32 = (pestat->gather_info.use_stats_inst) ?
+ pestat->gather_info.stats_inst_index :
+ pestat->hw->hmc.hmc_fn_id;
+ stats_inst_offset_32 *= 4;
+ stats_inst_offset_64 = stats_inst_offset_32 * 2;
+
+ gather_stats->rxvlanerr =
+ rd32(dev->hw,
+ dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_RXVLANERR]
+ + stats_inst_offset_32);
+ gather_stats->ip4rxdiscard =
+ rd32(dev->hw,
+ dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_IP4RXDISCARD]
+ + stats_inst_offset_32);
+ gather_stats->ip4rxtrunc =
+ rd32(dev->hw,
+ dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_IP4RXTRUNC]
+ + stats_inst_offset_32);
+ gather_stats->ip4txnoroute =
+ rd32(dev->hw,
+ dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_IP4TXNOROUTE]
+ + stats_inst_offset_32);
+ gather_stats->ip6rxdiscard =
+ rd32(dev->hw,
+ dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_IP6RXDISCARD]
+ + stats_inst_offset_32);
+ gather_stats->ip6rxtrunc =
+ rd32(dev->hw,
+ dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_IP6RXTRUNC]
+ + stats_inst_offset_32);
+ gather_stats->ip6txnoroute =
+ rd32(dev->hw,
+ dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_IP6TXNOROUTE]
+ + stats_inst_offset_32);
+ gather_stats->tcprtxseg =
+ rd32(dev->hw,
+ dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_TCPRTXSEG]
+ + stats_inst_offset_32);
+ gather_stats->tcprxopterr =
+ rd32(dev->hw,
+ dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_TCPRXOPTERR]
+ + stats_inst_offset_32);
+
+ gather_stats->ip4rxocts =
+ rd64(dev->hw,
+ dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4RXOCTS]
+ + stats_inst_offset_64);
+ gather_stats->ip4rxpkts =
+ rd64(dev->hw,
+ dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4RXPKTS]
+ + stats_inst_offset_64);
+ gather_stats->ip4txfrag =
+ rd64(dev->hw,
+ dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4RXFRAGS]
+ + stats_inst_offset_64);
+ gather_stats->ip4rxmcpkts =
+ rd64(dev->hw,
+ dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4RXMCPKTS]
+ + stats_inst_offset_64);
+ gather_stats->ip4txocts =
+ rd64(dev->hw,
+ dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4TXOCTS]
+ + stats_inst_offset_64);
+ gather_stats->ip4txpkts =
+ rd64(dev->hw,
+ dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4TXPKTS]
+ + stats_inst_offset_64);
+ gather_stats->ip4txfrag =
+ rd64(dev->hw,
+ dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4TXFRAGS]
+ + stats_inst_offset_64);
+ gather_stats->ip4txmcpkts =
+ rd64(dev->hw,
+ dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4TXMCPKTS]
+ + stats_inst_offset_64);
+ gather_stats->ip6rxocts =
+ rd64(dev->hw,
+ dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6RXOCTS]
+ + stats_inst_offset_64);
+ gather_stats->ip6rxpkts =
+ rd64(dev->hw,
+ dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6RXPKTS]
+ + stats_inst_offset_64);
+ gather_stats->ip6txfrags =
+ rd64(dev->hw,
+ dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6RXFRAGS]
+ + stats_inst_offset_64);
+ gather_stats->ip6rxmcpkts =
+ rd64(dev->hw,
+ dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6RXMCPKTS]
+ + stats_inst_offset_64);
+ gather_stats->ip6txocts =
+ rd64(dev->hw,
+ dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6TXOCTS]
+ + stats_inst_offset_64);
+ gather_stats->ip6txpkts =
+ rd64(dev->hw,
+ dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6TXPKTS]
+ + stats_inst_offset_64);
+ gather_stats->ip6txfrags =
+ rd64(dev->hw,
+ dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6TXFRAGS]
+ + stats_inst_offset_64);
+ gather_stats->ip6txmcpkts =
+ rd64(dev->hw,
+ dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6TXMCPKTS]
+ + stats_inst_offset_64);
+ gather_stats->tcprxsegs =
+ rd64(dev->hw,
+ dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_TCPRXSEGS]
+ + stats_inst_offset_64);
+ gather_stats->tcptxsegs =
+ rd64(dev->hw,
+ dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_TCPTXSEG]
+ + stats_inst_offset_64);
+ gather_stats->rdmarxrds =
+ rd64(dev->hw,
+ dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMARXRDS]
+ + stats_inst_offset_64);
+ gather_stats->rdmarxsnds =
+ rd64(dev->hw,
+ dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMARXSNDS]
+ + stats_inst_offset_64);
+ gather_stats->rdmarxwrs =
+ rd64(dev->hw,
+ dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMARXWRS]
+ + stats_inst_offset_64);
+ gather_stats->rdmatxrds =
+ rd64(dev->hw,
+ dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMATXRDS]
+ + stats_inst_offset_64);
+ gather_stats->rdmatxsnds =
+ rd64(dev->hw,
+ dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMATXSNDS]
+ + stats_inst_offset_64);
+ gather_stats->rdmatxwrs =
+ rd64(dev->hw,
+ dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMATXWRS]
+ + stats_inst_offset_64);
+ gather_stats->rdmavbn =
+ rd64(dev->hw,
+ dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMAVBND]
+ + stats_inst_offset_64);
+ gather_stats->rdmavinv =
+ rd64(dev->hw,
+ dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMAVINV]
+ + stats_inst_offset_64);
+ gather_stats->udprxpkts =
+ rd64(dev->hw,
+ dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_UDPRXPKTS]
+ + stats_inst_offset_64);
+ gather_stats->udptxpkts =
+ rd64(dev->hw,
+ dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_UDPTXPKTS]
+ + stats_inst_offset_64);
+
+ irdma_process_stats(pestat);
+}
+
+/**
+ * irdma_process_cqp_stats - Checking for wrap and update stats
+ * @cqp_request: cqp_request structure pointer
+ */
+static void irdma_process_cqp_stats(struct irdma_cqp_request *cqp_request)
+{
+ struct irdma_vsi_pestat *pestat = cqp_request->param;
+
+ irdma_process_stats(pestat);
+}
+
+/**
+ * irdma_cqp_gather_stats_cmd - Gather stats
+ * @dev: pointer to device structure
+ * @pestat: pointer to stats info
+ * @wait: flag to wait or not wait for stats
+ */
+enum irdma_status_code
+irdma_cqp_gather_stats_cmd(struct irdma_sc_dev *dev,
+ struct irdma_vsi_pestat *pestat, bool wait)
+
+{
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+ struct irdma_cqp *iwcqp = &rf->cqp;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ enum irdma_status_code status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait);
+ if (!cqp_request)
+ return IRDMA_ERR_NO_MEMORY;
+
+ cqp_info = &cqp_request->info;
+ memset(cqp_info, 0, sizeof(*cqp_info));
+ cqp_info->cqp_cmd = IRDMA_OP_STATS_GATHER;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.stats_gather.info = pestat->gather_info;
+ cqp_info->in.u.stats_gather.scratch = (uintptr_t)cqp_request;
+ cqp_info->in.u.stats_gather.cqp = &rf->cqp.sc_cqp;
+ cqp_request->param = pestat;
+ if (!wait)
+ cqp_request->callback_fcn = irdma_process_cqp_stats;
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ if (wait)
+ irdma_process_stats(pestat);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_cqp_stats_inst_cmd - Allocate/free stats instance
+ * @vsi: pointer to vsi structure
+ * @cmd: command to allocate or free
+ * @stats_info: pointer to allocate stats info
+ */
+enum irdma_status_code
+irdma_cqp_stats_inst_cmd(struct irdma_sc_vsi *vsi, u8 cmd,
+ struct irdma_stats_inst_info *stats_info)
+{
+ struct irdma_pci_f *rf = dev_to_rf(vsi->dev);
+ struct irdma_cqp *iwcqp = &rf->cqp;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ enum irdma_status_code status;
+ bool wait = false;
+
+ if (cmd == IRDMA_OP_STATS_ALLOCATE)
+ wait = true;
+ cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait);
+ if (!cqp_request)
+ return IRDMA_ERR_NO_MEMORY;
+
+ cqp_info = &cqp_request->info;
+ memset(cqp_info, 0, sizeof(*cqp_info));
+ cqp_info->cqp_cmd = cmd;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.stats_manage.info = *stats_info;
+ cqp_info->in.u.stats_manage.scratch = (uintptr_t)cqp_request;
+ cqp_info->in.u.stats_manage.cqp = &rf->cqp.sc_cqp;
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ if (wait)
+ stats_info->stats_idx = cqp_request->compl_info.op_ret_val;
+ irdma_put_cqp_request(iwcqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_cqp_ceq_cmd - Create/Destroy CEQ's after CEQ 0
+ * @dev: pointer to device info
+ * @sc_ceq: pointer to ceq structure
+ * @op: Create or Destroy
+ */
+enum irdma_status_code irdma_cqp_ceq_cmd(struct irdma_sc_dev *dev,
+ struct irdma_sc_ceq *sc_ceq, u8 op)
+{
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+ enum irdma_status_code status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
+ if (!cqp_request)
+ return IRDMA_ERR_NO_MEMORY;
+
+ cqp_info = &cqp_request->info;
+ cqp_info->post_sq = 1;
+ cqp_info->cqp_cmd = op;
+ cqp_info->in.u.ceq_create.ceq = sc_ceq;
+ cqp_info->in.u.ceq_create.scratch = (uintptr_t)cqp_request;
+
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_cqp_aeq_cmd - Create/Destroy AEQ
+ * @dev: pointer to device info
+ * @sc_aeq: pointer to aeq structure
+ * @op: Create or Destroy
+ */
+enum irdma_status_code irdma_cqp_aeq_cmd(struct irdma_sc_dev *dev,
+ struct irdma_sc_aeq *sc_aeq, u8 op)
+{
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+ enum irdma_status_code status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
+ if (!cqp_request)
+ return IRDMA_ERR_NO_MEMORY;
+
+ cqp_info = &cqp_request->info;
+ cqp_info->post_sq = 1;
+ cqp_info->cqp_cmd = op;
+ cqp_info->in.u.aeq_create.aeq = sc_aeq;
+ cqp_info->in.u.aeq_create.scratch = (uintptr_t)cqp_request;
+
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_cqp_ws_node_cmd - Add/modify/delete ws node
+ * @dev: pointer to device structure
+ * @cmd: Add, modify or delete
+ * @node_info: pointer to ws node info
+ */
+enum irdma_status_code
+irdma_cqp_ws_node_cmd(struct irdma_sc_dev *dev, u8 cmd,
+ struct irdma_ws_node_info *node_info)
+{
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+ struct irdma_cqp *iwcqp = &rf->cqp;
+ struct irdma_sc_cqp *cqp = &iwcqp->sc_cqp;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ enum irdma_status_code status;
+ bool poll;
+
+ if (!rf->sc_dev.ceq_valid)
+ poll = true;
+ else
+ poll = false;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, !poll);
+ if (!cqp_request)
+ return IRDMA_ERR_NO_MEMORY;
+
+ cqp_info = &cqp_request->info;
+ memset(cqp_info, 0, sizeof(*cqp_info));
+ cqp_info->cqp_cmd = cmd;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.ws_node.info = *node_info;
+ cqp_info->in.u.ws_node.cqp = cqp;
+ cqp_info->in.u.ws_node.scratch = (uintptr_t)cqp_request;
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ if (status)
+ goto exit;
+
+ if (poll) {
+ struct irdma_ccq_cqe_info compl_info;
+
+ status = irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_WORK_SCHED_NODE,
+ &compl_info);
+ node_info->qs_handle = compl_info.op_ret_val;
+ ibdev_dbg(&rf->iwdev->ibdev, "DCB: opcode=%d, compl_info.retval=%d\n",
+ compl_info.op_code, compl_info.op_ret_val);
+ } else {
+ node_info->qs_handle = cqp_request->compl_info.op_ret_val;
+ }
+
+exit:
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_cqp_up_map_cmd - Set the up-up mapping
+ * @dev: pointer to device structure
+ * @cmd: map command
+ * @map_info: pointer to up map info
+ */
+enum irdma_status_code irdma_cqp_up_map_cmd(struct irdma_sc_dev *dev, u8 cmd,
+ struct irdma_up_info *map_info)
+{
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+ struct irdma_cqp *iwcqp = &rf->cqp;
+ struct irdma_sc_cqp *cqp = &iwcqp->sc_cqp;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ enum irdma_status_code status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, false);
+ if (!cqp_request)
+ return IRDMA_ERR_NO_MEMORY;
+
+ cqp_info = &cqp_request->info;
+ memset(cqp_info, 0, sizeof(*cqp_info));
+ cqp_info->cqp_cmd = cmd;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.up_map.info = *map_info;
+ cqp_info->in.u.up_map.cqp = cqp;
+ cqp_info->in.u.up_map.scratch = (uintptr_t)cqp_request;
+
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_ah_cqp_op - perform an AH cqp operation
+ * @rf: RDMA PCI function
+ * @sc_ah: address handle
+ * @cmd: AH operation
+ * @wait: wait if true
+ * @callback_fcn: Callback function on CQP op completion
+ * @cb_param: parameter for callback function
+ *
+ * returns errno
+ */
+int irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd,
+ bool wait,
+ void (*callback_fcn)(struct irdma_cqp_request *),
+ void *cb_param)
+{
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ enum irdma_status_code status;
+
+ if (cmd != IRDMA_OP_AH_CREATE && cmd != IRDMA_OP_AH_DESTROY)
+ return -EINVAL;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = cmd;
+ cqp_info->post_sq = 1;
+ if (cmd == IRDMA_OP_AH_CREATE) {
+ cqp_info->in.u.ah_create.info = sc_ah->ah_info;
+ cqp_info->in.u.ah_create.scratch = (uintptr_t)cqp_request;
+ cqp_info->in.u.ah_create.cqp = &rf->cqp.sc_cqp;
+ } else if (cmd == IRDMA_OP_AH_DESTROY) {
+ cqp_info->in.u.ah_destroy.info = sc_ah->ah_info;
+ cqp_info->in.u.ah_destroy.scratch = (uintptr_t)cqp_request;
+ cqp_info->in.u.ah_destroy.cqp = &rf->cqp.sc_cqp;
+ }
+
+ if (!wait) {
+ cqp_request->callback_fcn = callback_fcn;
+ cqp_request->param = cb_param;
+ }
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+
+ if (status)
+ return -ENOMEM;
+
+ if (wait)
+ sc_ah->ah_info.ah_valid = (cmd == IRDMA_OP_AH_CREATE);
+
+ return 0;
+}
+
+/**
+ * irdma_ieq_ah_cb - callback after creation of AH for IEQ
+ * @cqp_request: pointer to cqp_request of create AH
+ */
+static void irdma_ieq_ah_cb(struct irdma_cqp_request *cqp_request)
+{
+ struct irdma_sc_qp *qp = cqp_request->param;
+ struct irdma_sc_ah *sc_ah = qp->pfpdu.ah;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->pfpdu.lock, flags);
+ if (!cqp_request->compl_info.op_ret_val) {
+ sc_ah->ah_info.ah_valid = true;
+ irdma_ieq_process_fpdus(qp, qp->vsi->ieq);
+ } else {
+ sc_ah->ah_info.ah_valid = false;
+ irdma_ieq_cleanup_qp(qp->vsi->ieq, qp);
+ }
+ spin_unlock_irqrestore(&qp->pfpdu.lock, flags);
+}
+
+/**
+ * irdma_ilq_ah_cb - callback after creation of AH for ILQ
+ * @cqp_request: pointer to cqp_request of create AH
+ */
+static void irdma_ilq_ah_cb(struct irdma_cqp_request *cqp_request)
+{
+ struct irdma_cm_node *cm_node = cqp_request->param;
+ struct irdma_sc_ah *sc_ah = cm_node->ah;
+
+ sc_ah->ah_info.ah_valid = !cqp_request->compl_info.op_ret_val;
+ irdma_add_conn_est_qh(cm_node);
+}
+
+/**
+ * irdma_puda_create_ah - create AH for ILQ/IEQ qp's
+ * @dev: device pointer
+ * @ah_info: Address handle info
+ * @wait: When true will wait for operation to complete
+ * @type: ILQ/IEQ
+ * @cb_param: Callback param when not waiting
+ * @ah_ret: Returned pointer to address handle if created
+ *
+ */
+enum irdma_status_code irdma_puda_create_ah(struct irdma_sc_dev *dev,
+ struct irdma_ah_info *ah_info,
+ bool wait, enum puda_rsrc_type type,
+ void *cb_param,
+ struct irdma_sc_ah **ah_ret)
+{
+ struct irdma_sc_ah *ah;
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+ int err;
+
+ ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
+ *ah_ret = ah;
+ if (!ah)
+ return IRDMA_ERR_NO_MEMORY;
+
+ err = irdma_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah,
+ &ah_info->ah_idx, &rf->next_ah);
+ if (err)
+ goto err_free;
+
+ ah->dev = dev;
+ ah->ah_info = *ah_info;
+
+ if (type == IRDMA_PUDA_RSRC_TYPE_ILQ)
+ err = irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_CREATE, wait,
+ irdma_ilq_ah_cb, cb_param);
+ else
+ err = irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_CREATE, wait,
+ irdma_ieq_ah_cb, cb_param);
+
+ if (err)
+ goto error;
+ return 0;
+
+error:
+ irdma_free_rsrc(rf, rf->allocated_ahs, ah->ah_info.ah_idx);
+err_free:
+ kfree(ah);
+ *ah_ret = NULL;
+ return IRDMA_ERR_NO_MEMORY;
+}
+
+/**
+ * irdma_puda_free_ah - free a puda address handle
+ * @dev: device pointer
+ * @ah: The address handle to free
+ */
+void irdma_puda_free_ah(struct irdma_sc_dev *dev, struct irdma_sc_ah *ah)
+{
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+
+ if (!ah)
+ return;
+
+ if (ah->ah_info.ah_valid) {
+ irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_DESTROY, false, NULL, NULL);
+ irdma_free_rsrc(rf, rf->allocated_ahs, ah->ah_info.ah_idx);
+ }
+
+ kfree(ah);
+}
+
+/**
+ * irdma_gsi_ud_qp_ah_cb - callback after creation of AH for GSI/ID QP
+ * @cqp_request: pointer to cqp_request of create AH
+ */
+void irdma_gsi_ud_qp_ah_cb(struct irdma_cqp_request *cqp_request)
+{
+ struct irdma_sc_ah *sc_ah = cqp_request->param;
+
+ if (!cqp_request->compl_info.op_ret_val)
+ sc_ah->ah_info.ah_valid = true;
+ else
+ sc_ah->ah_info.ah_valid = false;
+}
+
+/**
+ * irdma_prm_add_pble_mem - add moemory to pble resources
+ * @pprm: pble resource manager
+ * @pchunk: chunk of memory to add
+ */
+enum irdma_status_code irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm,
+ struct irdma_chunk *pchunk)
+{
+ u64 sizeofbitmap;
+
+ if (pchunk->size & 0xfff)
+ return IRDMA_ERR_PARAM;
+
+ sizeofbitmap = (u64)pchunk->size >> pprm->pble_shift;
+
+ pchunk->bitmapmem.size = sizeofbitmap >> 3;
+ pchunk->bitmapmem.va = kzalloc(pchunk->bitmapmem.size, GFP_KERNEL);
+
+ if (!pchunk->bitmapmem.va)
+ return IRDMA_ERR_NO_MEMORY;
+
+ pchunk->bitmapbuf = pchunk->bitmapmem.va;
+ bitmap_zero(pchunk->bitmapbuf, sizeofbitmap);
+
+ pchunk->sizeofbitmap = sizeofbitmap;
+ /* each pble is 8 bytes hence shift by 3 */
+ pprm->total_pble_alloc += pchunk->size >> 3;
+ pprm->free_pble_cnt += pchunk->size >> 3;
+
+ return 0;
+}
+
+/**
+ * irdma_prm_get_pbles - get pble's from prm
+ * @pprm: pble resource manager
+ * @chunkinfo: nformation about chunk where pble's were acquired
+ * @mem_size: size of pble memory needed
+ * @vaddr: returns virtual address of pble memory
+ * @fpm_addr: returns fpm address of pble memory
+ */
+enum irdma_status_code
+irdma_prm_get_pbles(struct irdma_pble_prm *pprm,
+ struct irdma_pble_chunkinfo *chunkinfo, u64 mem_size,
+ u64 **vaddr, u64 *fpm_addr)
+{
+ u64 bits_needed;
+ u64 bit_idx = PBLE_INVALID_IDX;
+ struct irdma_chunk *pchunk = NULL;
+ struct list_head *chunk_entry = pprm->clist.next;
+ u32 offset;
+ unsigned long flags;
+ *vaddr = NULL;
+ *fpm_addr = 0;
+
+ bits_needed = DIV_ROUND_UP_ULL(mem_size, BIT_ULL(pprm->pble_shift));
+
+ spin_lock_irqsave(&pprm->prm_lock, flags);
+ while (chunk_entry != &pprm->clist) {
+ pchunk = (struct irdma_chunk *)chunk_entry;
+ bit_idx = bitmap_find_next_zero_area(pchunk->bitmapbuf,
+ pchunk->sizeofbitmap, 0,
+ bits_needed, 0);
+ if (bit_idx < pchunk->sizeofbitmap)
+ break;
+
+ /* list.next used macro */
+ chunk_entry = pchunk->list.next;
+ }
+
+ if (!pchunk || bit_idx >= pchunk->sizeofbitmap) {
+ spin_unlock_irqrestore(&pprm->prm_lock, flags);
+ return IRDMA_ERR_NO_MEMORY;
+ }
+
+ bitmap_set(pchunk->bitmapbuf, bit_idx, bits_needed);
+ offset = bit_idx << pprm->pble_shift;
+ *vaddr = pchunk->vaddr + offset;
+ *fpm_addr = pchunk->fpm_addr + offset;
+
+ chunkinfo->pchunk = pchunk;
+ chunkinfo->bit_idx = bit_idx;
+ chunkinfo->bits_used = bits_needed;
+ /* 3 is sizeof pble divide */
+ pprm->free_pble_cnt -= chunkinfo->bits_used << (pprm->pble_shift - 3);
+ spin_unlock_irqrestore(&pprm->prm_lock, flags);
+
+ return 0;
+}
+
+/**
+ * irdma_prm_return_pbles - return pbles back to prm
+ * @pprm: pble resource manager
+ * @chunkinfo: chunk where pble's were acquired and to be freed
+ */
+void irdma_prm_return_pbles(struct irdma_pble_prm *pprm,
+ struct irdma_pble_chunkinfo *chunkinfo)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pprm->prm_lock, flags);
+ pprm->free_pble_cnt += chunkinfo->bits_used << (pprm->pble_shift - 3);
+ bitmap_clear(chunkinfo->pchunk->bitmapbuf, chunkinfo->bit_idx,
+ chunkinfo->bits_used);
+ spin_unlock_irqrestore(&pprm->prm_lock, flags);
+}
+
+enum irdma_status_code irdma_map_vm_page_list(struct irdma_hw *hw, void *va,
+ dma_addr_t *pg_dma, u32 pg_cnt)
+{
+ struct page *vm_page;
+ int i;
+ u8 *addr;
+
+ addr = (u8 *)(uintptr_t)va;
+ for (i = 0; i < pg_cnt; i++) {
+ vm_page = vmalloc_to_page(addr);
+ if (!vm_page)
+ goto err;
+
+ pg_dma[i] = dma_map_page(hw->device, vm_page, 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(hw->device, pg_dma[i]))
+ goto err;
+
+ addr += PAGE_SIZE;
+ }
+
+ return 0;
+
+err:
+ irdma_unmap_vm_page_list(hw, pg_dma, i);
+ return IRDMA_ERR_NO_MEMORY;
+}
+
+void irdma_unmap_vm_page_list(struct irdma_hw *hw, dma_addr_t *pg_dma, u32 pg_cnt)
+{
+ int i;
+
+ for (i = 0; i < pg_cnt; i++)
+ dma_unmap_page(hw->device, pg_dma[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
+}
+
+/**
+ * irdma_pble_free_paged_mem - free virtual paged memory
+ * @chunk: chunk to free with paged memory
+ */
+void irdma_pble_free_paged_mem(struct irdma_chunk *chunk)
+{
+ if (!chunk->pg_cnt)
+ goto done;
+
+ irdma_unmap_vm_page_list(chunk->dev->hw, chunk->dmainfo.dmaaddrs,
+ chunk->pg_cnt);
+
+done:
+ kfree(chunk->dmainfo.dmaaddrs);
+ chunk->dmainfo.dmaaddrs = NULL;
+ vfree(chunk->vaddr);
+ chunk->vaddr = NULL;
+ chunk->type = 0;
+}
+
+/**
+ * irdma_pble_get_paged_mem -allocate paged memory for pbles
+ * @chunk: chunk to add for paged memory
+ * @pg_cnt: number of pages needed
+ */
+enum irdma_status_code irdma_pble_get_paged_mem(struct irdma_chunk *chunk,
+ u32 pg_cnt)
+{
+ u32 size;
+ void *va;
+
+ chunk->dmainfo.dmaaddrs = kzalloc(pg_cnt << 3, GFP_KERNEL);
+ if (!chunk->dmainfo.dmaaddrs)
+ return IRDMA_ERR_NO_MEMORY;
+
+ size = PAGE_SIZE * pg_cnt;
+ va = vmalloc(size);
+ if (!va)
+ goto err;
+
+ if (irdma_map_vm_page_list(chunk->dev->hw, va, chunk->dmainfo.dmaaddrs,
+ pg_cnt)) {
+ vfree(va);
+ goto err;
+ }
+ chunk->vaddr = va;
+ chunk->size = size;
+ chunk->pg_cnt = pg_cnt;
+ chunk->type = PBLE_SD_PAGED;
+
+ return 0;
+err:
+ kfree(chunk->dmainfo.dmaaddrs);
+ chunk->dmainfo.dmaaddrs = NULL;
+
+ return IRDMA_ERR_NO_MEMORY;
+}
+
+/**
+ * irdma_alloc_ws_node_id - Allocate a tx scheduler node ID
+ * @dev: device pointer
+ */
+u16 irdma_alloc_ws_node_id(struct irdma_sc_dev *dev)
+{
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+ u32 next = 1;
+ u32 node_id;
+
+ if (irdma_alloc_rsrc(rf, rf->allocated_ws_nodes, rf->max_ws_node_id,
+ &node_id, &next))
+ return IRDMA_WS_NODE_INVALID;
+
+ return (u16)node_id;
+}
+
+/**
+ * irdma_free_ws_node_id - Free a tx scheduler node ID
+ * @dev: device pointer
+ * @node_id: Work scheduler node ID
+ */
+void irdma_free_ws_node_id(struct irdma_sc_dev *dev, u16 node_id)
+{
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+
+ irdma_free_rsrc(rf, rf->allocated_ws_nodes, (u32)node_id);
+}
+
+/**
+ * irdma_modify_qp_to_err - Modify a QP to error
+ * @sc_qp: qp structure
+ */
+void irdma_modify_qp_to_err(struct irdma_sc_qp *sc_qp)
+{
+ struct irdma_qp *qp = sc_qp->qp_uk.back_qp;
+ struct ib_qp_attr attr;
+
+ if (qp->iwdev->reset)
+ return;
+ attr.qp_state = IB_QPS_ERR;
+
+ if (rdma_protocol_roce(qp->ibqp.device, 1))
+ irdma_modify_qp_roce(&qp->ibqp, &attr, IB_QP_STATE, NULL);
+ else
+ irdma_modify_qp(&qp->ibqp, &attr, IB_QP_STATE, NULL);
+}
+
+void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event)
+{
+ struct ib_event ibevent;
+
+ if (!iwqp->ibqp.event_handler)
+ return;
+
+ switch (event) {
+ case IRDMA_QP_EVENT_CATASTROPHIC:
+ ibevent.event = IB_EVENT_QP_FATAL;
+ break;
+ case IRDMA_QP_EVENT_ACCESS_ERR:
+ ibevent.event = IB_EVENT_QP_ACCESS_ERR;
+ break;
+ }
+ ibevent.device = iwqp->ibqp.device;
+ ibevent.element.qp = &iwqp->ibqp;
+ iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context);
+}
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
new file mode 100644
index 000000000000..9712f6902ba8
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -0,0 +1,4544 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#include "main.h"
+
+/**
+ * irdma_query_device - get device attributes
+ * @ibdev: device pointer from stack
+ * @props: returning device attributes
+ * @udata: user data
+ */
+static int irdma_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *props,
+ struct ib_udata *udata)
+{
+ struct irdma_device *iwdev = to_iwdev(ibdev);
+ struct irdma_pci_f *rf = iwdev->rf;
+ struct pci_dev *pcidev = iwdev->rf->pcidev;
+ struct irdma_hw_attrs *hw_attrs = &rf->sc_dev.hw_attrs;
+
+ if (udata->inlen || udata->outlen)
+ return -EINVAL;
+
+ memset(props, 0, sizeof(*props));
+ ether_addr_copy((u8 *)&props->sys_image_guid, iwdev->netdev->dev_addr);
+ props->fw_ver = (u64)irdma_fw_major_ver(&rf->sc_dev) << 32 |
+ irdma_fw_minor_ver(&rf->sc_dev);
+ props->device_cap_flags = iwdev->device_cap_flags;
+ props->vendor_id = pcidev->vendor;
+ props->vendor_part_id = pcidev->device;
+
+ props->hw_ver = rf->pcidev->revision;
+ props->page_size_cap = SZ_4K | SZ_2M | SZ_1G;
+ props->max_mr_size = hw_attrs->max_mr_size;
+ props->max_qp = rf->max_qp - rf->used_qps;
+ props->max_qp_wr = hw_attrs->max_qp_wr;
+ props->max_send_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
+ props->max_recv_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
+ props->max_cq = rf->max_cq - rf->used_cqs;
+ props->max_cqe = rf->max_cqe;
+ props->max_mr = rf->max_mr - rf->used_mrs;
+ props->max_mw = props->max_mr;
+ props->max_pd = rf->max_pd - rf->used_pds;
+ props->max_sge_rd = hw_attrs->uk_attrs.max_hw_read_sges;
+ props->max_qp_rd_atom = hw_attrs->max_hw_ird;
+ props->max_qp_init_rd_atom = hw_attrs->max_hw_ord;
+ if (rdma_protocol_roce(ibdev, 1))
+ props->max_pkeys = IRDMA_PKEY_TBL_SZ;
+ props->max_ah = rf->max_ah;
+ props->max_mcast_grp = rf->max_mcg;
+ props->max_mcast_qp_attach = IRDMA_MAX_MGS_PER_CTX;
+ props->max_total_mcast_qp_attach = rf->max_qp * IRDMA_MAX_MGS_PER_CTX;
+ props->max_fast_reg_page_list_len = IRDMA_MAX_PAGES_PER_FMR;
+#define HCA_CLOCK_TIMESTAMP_MASK 0x1ffff
+ if (hw_attrs->uk_attrs.hw_rev >= IRDMA_GEN_2)
+ props->timestamp_mask = HCA_CLOCK_TIMESTAMP_MASK;
+
+ return 0;
+}
+
+/**
+ * irdma_get_eth_speed_and_width - Get IB port speed and width from netdev speed
+ * @link_speed: netdev phy link speed
+ * @active_speed: IB port speed
+ * @active_width: IB port width
+ */
+static void irdma_get_eth_speed_and_width(u32 link_speed, u16 *active_speed,
+ u8 *active_width)
+{
+ if (link_speed <= SPEED_1000) {
+ *active_width = IB_WIDTH_1X;
+ *active_speed = IB_SPEED_SDR;
+ } else if (link_speed <= SPEED_10000) {
+ *active_width = IB_WIDTH_1X;
+ *active_speed = IB_SPEED_FDR10;
+ } else if (link_speed <= SPEED_20000) {
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_DDR;
+ } else if (link_speed <= SPEED_25000) {
+ *active_width = IB_WIDTH_1X;
+ *active_speed = IB_SPEED_EDR;
+ } else if (link_speed <= SPEED_40000) {
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_FDR10;
+ } else {
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_EDR;
+ }
+}
+
+/**
+ * irdma_query_port - get port attributes
+ * @ibdev: device pointer from stack
+ * @port: port number for query
+ * @props: returning device attributes
+ */
+static int irdma_query_port(struct ib_device *ibdev, u32 port,
+ struct ib_port_attr *props)
+{
+ struct irdma_device *iwdev = to_iwdev(ibdev);
+ struct net_device *netdev = iwdev->netdev;
+
+ /* no need to zero out pros here. done by caller */
+
+ props->max_mtu = IB_MTU_4096;
+ props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
+ props->lid = 1;
+ props->lmc = 0;
+ props->sm_lid = 0;
+ props->sm_sl = 0;
+ if (netif_carrier_ok(netdev) && netif_running(netdev)) {
+ props->state = IB_PORT_ACTIVE;
+ props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
+ } else {
+ props->state = IB_PORT_DOWN;
+ props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
+ }
+ irdma_get_eth_speed_and_width(SPEED_100000, &props->active_speed,
+ &props->active_width);
+
+ if (rdma_protocol_roce(ibdev, 1)) {
+ props->gid_tbl_len = 32;
+ props->ip_gids = true;
+ props->pkey_tbl_len = IRDMA_PKEY_TBL_SZ;
+ } else {
+ props->gid_tbl_len = 1;
+ }
+ props->qkey_viol_cntr = 0;
+ props->port_cap_flags |= IB_PORT_CM_SUP | IB_PORT_REINIT_SUP;
+ props->max_msg_sz = iwdev->rf->sc_dev.hw_attrs.max_hw_outbound_msg_size;
+
+ return 0;
+}
+
+/**
+ * irdma_disassociate_ucontext - Disassociate user context
+ * @context: ib user context
+ */
+static void irdma_disassociate_ucontext(struct ib_ucontext *context)
+{
+}
+
+static int irdma_mmap_legacy(struct irdma_ucontext *ucontext,
+ struct vm_area_struct *vma)
+{
+ u64 pfn;
+
+ if (vma->vm_pgoff || vma->vm_end - vma->vm_start != PAGE_SIZE)
+ return -EINVAL;
+
+ vma->vm_private_data = ucontext;
+ pfn = ((uintptr_t)ucontext->iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET] +
+ pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT;
+
+ return rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, PAGE_SIZE,
+ pgprot_noncached(vma->vm_page_prot), NULL);
+}
+
+static void irdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
+{
+ struct irdma_user_mmap_entry *entry = to_irdma_mmap_entry(rdma_entry);
+
+ kfree(entry);
+}
+
+static struct rdma_user_mmap_entry*
+irdma_user_mmap_entry_insert(struct irdma_ucontext *ucontext, u64 bar_offset,
+ enum irdma_mmap_flag mmap_flag, u64 *mmap_offset)
+{
+ struct irdma_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ int ret;
+
+ if (!entry)
+ return NULL;
+
+ entry->bar_offset = bar_offset;
+ entry->mmap_flag = mmap_flag;
+
+ ret = rdma_user_mmap_entry_insert(&ucontext->ibucontext,
+ &entry->rdma_entry, PAGE_SIZE);
+ if (ret) {
+ kfree(entry);
+ return NULL;
+ }
+ *mmap_offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
+
+ return &entry->rdma_entry;
+}
+
+/**
+ * irdma_mmap - user memory map
+ * @context: context created during alloc
+ * @vma: kernel info for user memory map
+ */
+static int irdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+{
+ struct rdma_user_mmap_entry *rdma_entry;
+ struct irdma_user_mmap_entry *entry;
+ struct irdma_ucontext *ucontext;
+ u64 pfn;
+ int ret;
+
+ ucontext = to_ucontext(context);
+
+ /* Legacy support for libi40iw with hard-coded mmap key */
+ if (ucontext->legacy_mode)
+ return irdma_mmap_legacy(ucontext, vma);
+
+ rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma);
+ if (!rdma_entry) {
+ ibdev_dbg(&ucontext->iwdev->ibdev,
+ "VERBS: pgoff[0x%lx] does not have valid entry\n",
+ vma->vm_pgoff);
+ return -EINVAL;
+ }
+
+ entry = to_irdma_mmap_entry(rdma_entry);
+ ibdev_dbg(&ucontext->iwdev->ibdev,
+ "VERBS: bar_offset [0x%llx] mmap_flag [%d]\n",
+ entry->bar_offset, entry->mmap_flag);
+
+ pfn = (entry->bar_offset +
+ pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT;
+
+ switch (entry->mmap_flag) {
+ case IRDMA_MMAP_IO_NC:
+ ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
+ pgprot_noncached(vma->vm_page_prot),
+ rdma_entry);
+ break;
+ case IRDMA_MMAP_IO_WC:
+ ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
+ pgprot_writecombine(vma->vm_page_prot),
+ rdma_entry);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ ibdev_dbg(&ucontext->iwdev->ibdev,
+ "VERBS: bar_offset [0x%llx] mmap_flag[%d] err[%d]\n",
+ entry->bar_offset, entry->mmap_flag, ret);
+ rdma_user_mmap_entry_put(rdma_entry);
+
+ return ret;
+}
+
+/**
+ * irdma_alloc_push_page - allocate a push page for qp
+ * @iwqp: qp pointer
+ */
+static void irdma_alloc_push_page(struct irdma_qp *iwqp)
+{
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_device *iwdev = iwqp->iwdev;
+ struct irdma_sc_qp *qp = &iwqp->sc_qp;
+ enum irdma_status_code status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
+ if (!cqp_request)
+ return;
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = IRDMA_OP_MANAGE_PUSH_PAGE;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.manage_push_page.info.push_idx = 0;
+ cqp_info->in.u.manage_push_page.info.qs_handle =
+ qp->vsi->qos[qp->user_pri].qs_handle;
+ cqp_info->in.u.manage_push_page.info.free_page = 0;
+ cqp_info->in.u.manage_push_page.info.push_page_type = 0;
+ cqp_info->in.u.manage_push_page.cqp = &iwdev->rf->cqp.sc_cqp;
+ cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
+
+ status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
+ if (!status && cqp_request->compl_info.op_ret_val <
+ iwdev->rf->sc_dev.hw_attrs.max_hw_device_pages) {
+ qp->push_idx = cqp_request->compl_info.op_ret_val;
+ qp->push_offset = 0;
+ }
+
+ irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
+}
+
+/**
+ * irdma_alloc_ucontext - Allocate the user context data structure
+ * @uctx: uverbs context pointer
+ * @udata: user data
+ *
+ * This keeps track of all objects associated with a particular
+ * user-mode client.
+ */
+static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
+ struct ib_udata *udata)
+{
+ struct ib_device *ibdev = uctx->device;
+ struct irdma_device *iwdev = to_iwdev(ibdev);
+ struct irdma_alloc_ucontext_req req;
+ struct irdma_alloc_ucontext_resp uresp = {};
+ struct irdma_ucontext *ucontext = to_ucontext(uctx);
+ struct irdma_uk_attrs *uk_attrs;
+
+ if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen)))
+ return -EINVAL;
+
+ if (req.userspace_ver < 4 || req.userspace_ver > IRDMA_ABI_VER)
+ goto ver_error;
+
+ ucontext->iwdev = iwdev;
+ ucontext->abi_ver = req.userspace_ver;
+
+ uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
+ /* GEN_1 legacy support with libi40iw */
+ if (udata->outlen < sizeof(uresp)) {
+ if (uk_attrs->hw_rev != IRDMA_GEN_1)
+ return -EOPNOTSUPP;
+
+ ucontext->legacy_mode = true;
+ uresp.max_qps = iwdev->rf->max_qp;
+ uresp.max_pds = iwdev->rf->sc_dev.hw_attrs.max_hw_pds;
+ uresp.wq_size = iwdev->rf->sc_dev.hw_attrs.max_qp_wr * 2;
+ uresp.kernel_ver = req.userspace_ver;
+ if (ib_copy_to_udata(udata, &uresp,
+ min(sizeof(uresp), udata->outlen)))
+ return -EFAULT;
+ } else {
+ u64 bar_off = (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
+
+ ucontext->db_mmap_entry =
+ irdma_user_mmap_entry_insert(ucontext, bar_off,
+ IRDMA_MMAP_IO_NC,
+ &uresp.db_mmap_key);
+ if (!ucontext->db_mmap_entry)
+ return -ENOMEM;
+
+ uresp.kernel_ver = IRDMA_ABI_VER;
+ uresp.feature_flags = uk_attrs->feature_flags;
+ uresp.max_hw_wq_frags = uk_attrs->max_hw_wq_frags;
+ uresp.max_hw_read_sges = uk_attrs->max_hw_read_sges;
+ uresp.max_hw_inline = uk_attrs->max_hw_inline;
+ uresp.max_hw_rq_quanta = uk_attrs->max_hw_rq_quanta;
+ uresp.max_hw_wq_quanta = uk_attrs->max_hw_wq_quanta;
+ uresp.max_hw_sq_chunk = uk_attrs->max_hw_sq_chunk;
+ uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size;
+ uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size;
+ uresp.hw_rev = uk_attrs->hw_rev;
+ if (ib_copy_to_udata(udata, &uresp,
+ min(sizeof(uresp), udata->outlen))) {
+ rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
+ return -EFAULT;
+ }
+ }
+
+ INIT_LIST_HEAD(&ucontext->cq_reg_mem_list);
+ spin_lock_init(&ucontext->cq_reg_mem_list_lock);
+ INIT_LIST_HEAD(&ucontext->qp_reg_mem_list);
+ spin_lock_init(&ucontext->qp_reg_mem_list_lock);
+
+ return 0;
+
+ver_error:
+ ibdev_err(&iwdev->ibdev,
+ "Invalid userspace driver version detected. Detected version %d, should be %d\n",
+ req.userspace_ver, IRDMA_ABI_VER);
+ return -EINVAL;
+}
+
+/**
+ * irdma_dealloc_ucontext - deallocate the user context data structure
+ * @context: user context created during alloc
+ */
+static void irdma_dealloc_ucontext(struct ib_ucontext *context)
+{
+ struct irdma_ucontext *ucontext = to_ucontext(context);
+
+ rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
+}
+
+/**
+ * irdma_alloc_pd - allocate protection domain
+ * @pd: PD pointer
+ * @udata: user data
+ */
+static int irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
+{
+ struct irdma_pd *iwpd = to_iwpd(pd);
+ struct irdma_device *iwdev = to_iwdev(pd->device);
+ struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
+ struct irdma_pci_f *rf = iwdev->rf;
+ struct irdma_alloc_pd_resp uresp = {};
+ struct irdma_sc_pd *sc_pd;
+ u32 pd_id = 0;
+ int err;
+
+ err = irdma_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id,
+ &rf->next_pd);
+ if (err)
+ return err;
+
+ sc_pd = &iwpd->sc_pd;
+ if (udata) {
+ struct irdma_ucontext *ucontext =
+ rdma_udata_to_drv_context(udata, struct irdma_ucontext,
+ ibucontext);
+ irdma_sc_pd_init(dev, sc_pd, pd_id, ucontext->abi_ver);
+ uresp.pd_id = pd_id;
+ if (ib_copy_to_udata(udata, &uresp,
+ min(sizeof(uresp), udata->outlen))) {
+ err = -EFAULT;
+ goto error;
+ }
+ } else {
+ irdma_sc_pd_init(dev, sc_pd, pd_id, IRDMA_ABI_VER);
+ }
+
+ return 0;
+error:
+ irdma_free_rsrc(rf, rf->allocated_pds, pd_id);
+
+ return err;
+}
+
+/**
+ * irdma_dealloc_pd - deallocate pd
+ * @ibpd: ptr of pd to be deallocated
+ * @udata: user data
+ */
+static int irdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+{
+ struct irdma_pd *iwpd = to_iwpd(ibpd);
+ struct irdma_device *iwdev = to_iwdev(ibpd->device);
+
+ irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_pds, iwpd->sc_pd.pd_id);
+
+ return 0;
+}
+
+/**
+ * irdma_get_pbl - Retrieve pbl from a list given a virtual
+ * address
+ * @va: user virtual address
+ * @pbl_list: pbl list to search in (QP's or CQ's)
+ */
+static struct irdma_pbl *irdma_get_pbl(unsigned long va,
+ struct list_head *pbl_list)
+{
+ struct irdma_pbl *iwpbl;
+
+ list_for_each_entry (iwpbl, pbl_list, list) {
+ if (iwpbl->user_base == va) {
+ list_del(&iwpbl->list);
+ iwpbl->on_list = false;
+ return iwpbl;
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * irdma_clean_cqes - clean cq entries for qp
+ * @iwqp: qp ptr (user or kernel)
+ * @iwcq: cq ptr
+ */
+static void irdma_clean_cqes(struct irdma_qp *iwqp, struct irdma_cq *iwcq)
+{
+ struct irdma_cq_uk *ukcq = &iwcq->sc_cq.cq_uk;
+ unsigned long flags;
+
+ spin_lock_irqsave(&iwcq->lock, flags);
+ irdma_uk_clean_cq(&iwqp->sc_qp.qp_uk, ukcq);
+ spin_unlock_irqrestore(&iwcq->lock, flags);
+}
+
+static void irdma_remove_push_mmap_entries(struct irdma_qp *iwqp)
+{
+ if (iwqp->push_db_mmap_entry) {
+ rdma_user_mmap_entry_remove(iwqp->push_db_mmap_entry);
+ iwqp->push_db_mmap_entry = NULL;
+ }
+ if (iwqp->push_wqe_mmap_entry) {
+ rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry);
+ iwqp->push_wqe_mmap_entry = NULL;
+ }
+}
+
+static int irdma_setup_push_mmap_entries(struct irdma_ucontext *ucontext,
+ struct irdma_qp *iwqp,
+ u64 *push_wqe_mmap_key,
+ u64 *push_db_mmap_key)
+{
+ struct irdma_device *iwdev = ucontext->iwdev;
+ u64 rsvd, bar_off;
+
+ rsvd = IRDMA_PF_BAR_RSVD;
+ bar_off = (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
+ /* skip over db page */
+ bar_off += IRDMA_HW_PAGE_SIZE;
+ /* push wqe page */
+ bar_off += rsvd + iwqp->sc_qp.push_idx * IRDMA_HW_PAGE_SIZE;
+ iwqp->push_wqe_mmap_entry = irdma_user_mmap_entry_insert(ucontext,
+ bar_off, IRDMA_MMAP_IO_WC,
+ push_wqe_mmap_key);
+ if (!iwqp->push_wqe_mmap_entry)
+ return -ENOMEM;
+
+ /* push doorbell page */
+ bar_off += IRDMA_HW_PAGE_SIZE;
+ iwqp->push_db_mmap_entry = irdma_user_mmap_entry_insert(ucontext,
+ bar_off, IRDMA_MMAP_IO_NC,
+ push_db_mmap_key);
+ if (!iwqp->push_db_mmap_entry) {
+ rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_destroy_qp - destroy qp
+ * @ibqp: qp's ib pointer also to get to device's qp address
+ * @udata: user data
+ */
+static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
+{
+ struct irdma_qp *iwqp = to_iwqp(ibqp);
+ struct irdma_device *iwdev = iwqp->iwdev;
+
+ iwqp->sc_qp.qp_uk.destroy_pending = true;
+
+ if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS)
+ irdma_modify_qp_to_err(&iwqp->sc_qp);
+
+ irdma_qp_rem_ref(&iwqp->ibqp);
+ wait_for_completion(&iwqp->free_qp);
+ irdma_free_lsmm_rsrc(iwqp);
+ if (!iwdev->reset)
+ irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp);
+
+ if (!iwqp->user_mode) {
+ if (iwqp->iwscq) {
+ irdma_clean_cqes(iwqp, iwqp->iwscq);
+ if (iwqp->iwrcq != iwqp->iwscq)
+ irdma_clean_cqes(iwqp, iwqp->iwrcq);
+ }
+ }
+ irdma_remove_push_mmap_entries(iwqp);
+ irdma_free_qp_rsrc(iwqp);
+
+ return 0;
+}
+
+/**
+ * irdma_setup_virt_qp - setup for allocation of virtual qp
+ * @iwdev: irdma device
+ * @iwqp: qp ptr
+ * @init_info: initialize info to return
+ */
+static int irdma_setup_virt_qp(struct irdma_device *iwdev,
+ struct irdma_qp *iwqp,
+ struct irdma_qp_init_info *init_info)
+{
+ struct irdma_pbl *iwpbl = iwqp->iwpbl;
+ struct irdma_qp_mr *qpmr = &iwpbl->qp_mr;
+
+ iwqp->page = qpmr->sq_page;
+ init_info->shadow_area_pa = qpmr->shadow;
+ if (iwpbl->pbl_allocated) {
+ init_info->virtual_map = true;
+ init_info->sq_pa = qpmr->sq_pbl.idx;
+ init_info->rq_pa = qpmr->rq_pbl.idx;
+ } else {
+ init_info->sq_pa = qpmr->sq_pbl.addr;
+ init_info->rq_pa = qpmr->rq_pbl.addr;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_setup_kmode_qp - setup initialization for kernel mode qp
+ * @iwdev: iwarp device
+ * @iwqp: qp ptr (user or kernel)
+ * @info: initialize info to return
+ * @init_attr: Initial QP create attributes
+ */
+static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
+ struct irdma_qp *iwqp,
+ struct irdma_qp_init_info *info,
+ struct ib_qp_init_attr *init_attr)
+{
+ struct irdma_dma_mem *mem = &iwqp->kqp.dma_mem;
+ u32 sqdepth, rqdepth;
+ u8 sqshift, rqshift;
+ u32 size;
+ enum irdma_status_code status;
+ struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
+ struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
+
+ irdma_get_wqe_shift(uk_attrs,
+ uk_attrs->hw_rev >= IRDMA_GEN_2 ? ukinfo->max_sq_frag_cnt + 1 :
+ ukinfo->max_sq_frag_cnt,
+ ukinfo->max_inline_data, &sqshift);
+ status = irdma_get_sqdepth(uk_attrs, ukinfo->sq_size, sqshift,
+ &sqdepth);
+ if (status)
+ return -ENOMEM;
+
+ if (uk_attrs->hw_rev == IRDMA_GEN_1)
+ rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
+ else
+ irdma_get_wqe_shift(uk_attrs, ukinfo->max_rq_frag_cnt, 0,
+ &rqshift);
+
+ status = irdma_get_rqdepth(uk_attrs, ukinfo->rq_size, rqshift,
+ &rqdepth);
+ if (status)
+ return -ENOMEM;
+
+ iwqp->kqp.sq_wrid_mem =
+ kcalloc(sqdepth, sizeof(*iwqp->kqp.sq_wrid_mem), GFP_KERNEL);
+ if (!iwqp->kqp.sq_wrid_mem)
+ return -ENOMEM;
+
+ iwqp->kqp.rq_wrid_mem =
+ kcalloc(rqdepth, sizeof(*iwqp->kqp.rq_wrid_mem), GFP_KERNEL);
+ if (!iwqp->kqp.rq_wrid_mem) {
+ kfree(iwqp->kqp.sq_wrid_mem);
+ iwqp->kqp.sq_wrid_mem = NULL;
+ return -ENOMEM;
+ }
+
+ ukinfo->sq_wrtrk_array = iwqp->kqp.sq_wrid_mem;
+ ukinfo->rq_wrid_array = iwqp->kqp.rq_wrid_mem;
+
+ size = (sqdepth + rqdepth) * IRDMA_QP_WQE_MIN_SIZE;
+ size += (IRDMA_SHADOW_AREA_SIZE << 3);
+
+ mem->size = ALIGN(size, 256);
+ mem->va = dma_alloc_coherent(iwdev->rf->hw.device, mem->size,
+ &mem->pa, GFP_KERNEL);
+ if (!mem->va) {
+ kfree(iwqp->kqp.sq_wrid_mem);
+ iwqp->kqp.sq_wrid_mem = NULL;
+ kfree(iwqp->kqp.rq_wrid_mem);
+ iwqp->kqp.rq_wrid_mem = NULL;
+ return -ENOMEM;
+ }
+
+ ukinfo->sq = mem->va;
+ info->sq_pa = mem->pa;
+ ukinfo->rq = &ukinfo->sq[sqdepth];
+ info->rq_pa = info->sq_pa + (sqdepth * IRDMA_QP_WQE_MIN_SIZE);
+ ukinfo->shadow_area = ukinfo->rq[rqdepth].elem;
+ info->shadow_area_pa = info->rq_pa + (rqdepth * IRDMA_QP_WQE_MIN_SIZE);
+ ukinfo->sq_size = sqdepth >> sqshift;
+ ukinfo->rq_size = rqdepth >> rqshift;
+ ukinfo->qp_id = iwqp->ibqp.qp_num;
+
+ init_attr->cap.max_send_wr = (sqdepth - IRDMA_SQ_RSVD) >> sqshift;
+ init_attr->cap.max_recv_wr = (rqdepth - IRDMA_RQ_RSVD) >> rqshift;
+
+ return 0;
+}
+
+static int irdma_cqp_create_qp_cmd(struct irdma_qp *iwqp)
+{
+ struct irdma_pci_f *rf = iwqp->iwdev->rf;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_create_qp_info *qp_info;
+ enum irdma_status_code status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ qp_info = &cqp_request->info.in.u.qp_create.info;
+ memset(qp_info, 0, sizeof(*qp_info));
+ qp_info->mac_valid = true;
+ qp_info->cq_num_valid = true;
+ qp_info->next_iwarp_state = IRDMA_QP_STATE_IDLE;
+
+ cqp_info->cqp_cmd = IRDMA_OP_QP_CREATE;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.qp_create.qp = &iwqp->sc_qp;
+ cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+
+ return status ? -ENOMEM : 0;
+}
+
+static void irdma_roce_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
+ struct irdma_qp_host_ctx_info *ctx_info)
+{
+ struct irdma_device *iwdev = iwqp->iwdev;
+ struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
+ struct irdma_roce_offload_info *roce_info;
+ struct irdma_udp_offload_info *udp_info;
+
+ udp_info = &iwqp->udp_info;
+ udp_info->snd_mss = ib_mtu_enum_to_int(ib_mtu_int_to_enum(iwdev->vsi.mtu));
+ udp_info->cwnd = iwdev->roce_cwnd;
+ udp_info->rexmit_thresh = 2;
+ udp_info->rnr_nak_thresh = 2;
+ udp_info->src_port = 0xc000;
+ udp_info->dst_port = ROCE_V2_UDP_DPORT;
+ roce_info = &iwqp->roce_info;
+ ether_addr_copy(roce_info->mac_addr, iwdev->netdev->dev_addr);
+
+ roce_info->rd_en = true;
+ roce_info->wr_rdresp_en = true;
+ roce_info->bind_en = true;
+ roce_info->dcqcn_en = false;
+ roce_info->rtomin = 5;
+
+ roce_info->ack_credits = iwdev->roce_ackcreds;
+ roce_info->ird_size = dev->hw_attrs.max_hw_ird;
+ roce_info->ord_size = dev->hw_attrs.max_hw_ord;
+
+ if (!iwqp->user_mode) {
+ roce_info->priv_mode_en = true;
+ roce_info->fast_reg_en = true;
+ roce_info->udprivcq_en = true;
+ }
+ roce_info->roce_tver = 0;
+
+ ctx_info->roce_info = &iwqp->roce_info;
+ ctx_info->udp_info = &iwqp->udp_info;
+ irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
+}
+
+static void irdma_iw_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
+ struct irdma_qp_host_ctx_info *ctx_info)
+{
+ struct irdma_device *iwdev = iwqp->iwdev;
+ struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
+ struct irdma_iwarp_offload_info *iwarp_info;
+
+ iwarp_info = &iwqp->iwarp_info;
+ ether_addr_copy(iwarp_info->mac_addr, iwdev->netdev->dev_addr);
+ iwarp_info->rd_en = true;
+ iwarp_info->wr_rdresp_en = true;
+ iwarp_info->bind_en = true;
+ iwarp_info->ecn_en = true;
+ iwarp_info->rtomin = 5;
+
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
+ iwarp_info->ib_rd_en = true;
+ if (!iwqp->user_mode) {
+ iwarp_info->priv_mode_en = true;
+ iwarp_info->fast_reg_en = true;
+ }
+ iwarp_info->ddp_ver = 1;
+ iwarp_info->rdmap_ver = 1;
+
+ ctx_info->iwarp_info = &iwqp->iwarp_info;
+ ctx_info->iwarp_info_valid = true;
+ irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
+ ctx_info->iwarp_info_valid = false;
+}
+
+static int irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr,
+ struct irdma_device *iwdev)
+{
+ struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
+ struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
+
+ if (init_attr->create_flags)
+ return -EOPNOTSUPP;
+
+ if (init_attr->cap.max_inline_data > uk_attrs->max_hw_inline ||
+ init_attr->cap.max_send_sge > uk_attrs->max_hw_wq_frags ||
+ init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags)
+ return -EINVAL;
+
+ if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
+ if (init_attr->qp_type != IB_QPT_RC &&
+ init_attr->qp_type != IB_QPT_UD &&
+ init_attr->qp_type != IB_QPT_GSI)
+ return -EOPNOTSUPP;
+ } else {
+ if (init_attr->qp_type != IB_QPT_RC)
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_create_qp - create qp
+ * @ibpd: ptr of pd
+ * @init_attr: attributes for qp
+ * @udata: user data for create qp
+ */
+static struct ib_qp *irdma_create_qp(struct ib_pd *ibpd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct irdma_pd *iwpd = to_iwpd(ibpd);
+ struct irdma_device *iwdev = to_iwdev(ibpd->device);
+ struct irdma_pci_f *rf = iwdev->rf;
+ struct irdma_qp *iwqp;
+ struct irdma_create_qp_req req;
+ struct irdma_create_qp_resp uresp = {};
+ u32 qp_num = 0;
+ enum irdma_status_code ret;
+ int err_code;
+ int sq_size;
+ int rq_size;
+ struct irdma_sc_qp *qp;
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
+ struct irdma_qp_init_info init_info = {};
+ struct irdma_qp_host_ctx_info *ctx_info;
+ unsigned long flags;
+
+ err_code = irdma_validate_qp_attrs(init_attr, iwdev);
+ if (err_code)
+ return ERR_PTR(err_code);
+
+ sq_size = init_attr->cap.max_send_wr;
+ rq_size = init_attr->cap.max_recv_wr;
+
+ init_info.vsi = &iwdev->vsi;
+ init_info.qp_uk_init_info.uk_attrs = uk_attrs;
+ init_info.qp_uk_init_info.sq_size = sq_size;
+ init_info.qp_uk_init_info.rq_size = rq_size;
+ init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
+ init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
+ init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data;
+
+ iwqp = kzalloc(sizeof(*iwqp), GFP_KERNEL);
+ if (!iwqp)
+ return ERR_PTR(-ENOMEM);
+
+ qp = &iwqp->sc_qp;
+ qp->qp_uk.back_qp = iwqp;
+ qp->qp_uk.lock = &iwqp->lock;
+ qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX;
+
+ iwqp->iwdev = iwdev;
+ iwqp->q2_ctx_mem.size = ALIGN(IRDMA_Q2_BUF_SIZE + IRDMA_QP_CTX_SIZE,
+ 256);
+ iwqp->q2_ctx_mem.va = dma_alloc_coherent(dev->hw->device,
+ iwqp->q2_ctx_mem.size,
+ &iwqp->q2_ctx_mem.pa,
+ GFP_KERNEL);
+ if (!iwqp->q2_ctx_mem.va) {
+ err_code = -ENOMEM;
+ goto error;
+ }
+
+ init_info.q2 = iwqp->q2_ctx_mem.va;
+ init_info.q2_pa = iwqp->q2_ctx_mem.pa;
+ init_info.host_ctx = (__le64 *)(init_info.q2 + IRDMA_Q2_BUF_SIZE);
+ init_info.host_ctx_pa = init_info.q2_pa + IRDMA_Q2_BUF_SIZE;
+
+ if (init_attr->qp_type == IB_QPT_GSI)
+ qp_num = 1;
+ else
+ err_code = irdma_alloc_rsrc(rf, rf->allocated_qps, rf->max_qp,
+ &qp_num, &rf->next_qp);
+ if (err_code)
+ goto error;
+
+ iwqp->iwpd = iwpd;
+ iwqp->ibqp.qp_num = qp_num;
+ qp = &iwqp->sc_qp;
+ iwqp->iwscq = to_iwcq(init_attr->send_cq);
+ iwqp->iwrcq = to_iwcq(init_attr->recv_cq);
+ iwqp->host_ctx.va = init_info.host_ctx;
+ iwqp->host_ctx.pa = init_info.host_ctx_pa;
+ iwqp->host_ctx.size = IRDMA_QP_CTX_SIZE;
+
+ init_info.pd = &iwpd->sc_pd;
+ init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num;
+ if (!rdma_protocol_roce(&iwdev->ibdev, 1))
+ init_info.qp_uk_init_info.first_sq_wq = 1;
+ iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
+ init_waitqueue_head(&iwqp->waitq);
+ init_waitqueue_head(&iwqp->mod_qp_waitq);
+
+ if (udata) {
+ err_code = ib_copy_from_udata(&req, udata,
+ min(sizeof(req), udata->inlen));
+ if (err_code) {
+ ibdev_dbg(&iwdev->ibdev,
+ "VERBS: ib_copy_from_data fail\n");
+ goto error;
+ }
+
+ iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
+ iwqp->user_mode = 1;
+ if (req.user_wqe_bufs) {
+ struct irdma_ucontext *ucontext =
+ rdma_udata_to_drv_context(udata,
+ struct irdma_ucontext,
+ ibucontext);
+
+ init_info.qp_uk_init_info.legacy_mode = ucontext->legacy_mode;
+ spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
+ iwqp->iwpbl = irdma_get_pbl((unsigned long)req.user_wqe_bufs,
+ &ucontext->qp_reg_mem_list);
+ spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
+
+ if (!iwqp->iwpbl) {
+ err_code = -ENODATA;
+ ibdev_dbg(&iwdev->ibdev, "VERBS: no pbl info\n");
+ goto error;
+ }
+ }
+ init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver;
+ err_code = irdma_setup_virt_qp(iwdev, iwqp, &init_info);
+ } else {
+ init_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER;
+ err_code = irdma_setup_kmode_qp(iwdev, iwqp, &init_info, init_attr);
+ }
+
+ if (err_code) {
+ ibdev_dbg(&iwdev->ibdev, "VERBS: setup qp failed\n");
+ goto error;
+ }
+
+ if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
+ if (init_attr->qp_type == IB_QPT_RC) {
+ init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_RC;
+ init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM |
+ IRDMA_WRITE_WITH_IMM |
+ IRDMA_ROCE;
+ } else {
+ init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_UD;
+ init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM |
+ IRDMA_ROCE;
+ }
+ } else {
+ init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_IWARP;
+ init_info.qp_uk_init_info.qp_caps = IRDMA_WRITE_WITH_IMM;
+ }
+
+ if (dev->hw_attrs.uk_attrs.hw_rev > IRDMA_GEN_1)
+ init_info.qp_uk_init_info.qp_caps |= IRDMA_PUSH_MODE;
+
+ ret = irdma_sc_qp_init(qp, &init_info);
+ if (ret) {
+ err_code = -EPROTO;
+ ibdev_dbg(&iwdev->ibdev, "VERBS: qp_init fail\n");
+ goto error;
+ }
+
+ ctx_info = &iwqp->ctx_info;
+ ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
+ ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
+
+ if (rdma_protocol_roce(&iwdev->ibdev, 1))
+ irdma_roce_fill_and_set_qpctx_info(iwqp, ctx_info);
+ else
+ irdma_iw_fill_and_set_qpctx_info(iwqp, ctx_info);
+
+ err_code = irdma_cqp_create_qp_cmd(iwqp);
+ if (err_code)
+ goto error;
+
+ refcount_set(&iwqp->refcnt, 1);
+ spin_lock_init(&iwqp->lock);
+ spin_lock_init(&iwqp->sc_qp.pfpdu.lock);
+ iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
+ rf->qp_table[qp_num] = iwqp;
+ iwqp->max_send_wr = sq_size;
+ iwqp->max_recv_wr = rq_size;
+
+ if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
+ if (dev->ws_add(&iwdev->vsi, 0)) {
+ irdma_cqp_qp_destroy_cmd(&rf->sc_dev, &iwqp->sc_qp);
+ err_code = -EINVAL;
+ goto error;
+ }
+
+ irdma_qp_add_qos(&iwqp->sc_qp);
+ }
+
+ if (udata) {
+ /* GEN_1 legacy support with libi40iw does not have expanded uresp struct */
+ if (udata->outlen < sizeof(uresp)) {
+ uresp.lsmm = 1;
+ uresp.push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX_GEN_1;
+ } else {
+ if (rdma_protocol_iwarp(&iwdev->ibdev, 1))
+ uresp.lsmm = 1;
+ }
+ uresp.actual_sq_size = sq_size;
+ uresp.actual_rq_size = rq_size;
+ uresp.qp_id = qp_num;
+ uresp.qp_caps = qp->qp_uk.qp_caps;
+
+ err_code = ib_copy_to_udata(udata, &uresp,
+ min(sizeof(uresp), udata->outlen));
+ if (err_code) {
+ ibdev_dbg(&iwdev->ibdev, "VERBS: copy_to_udata failed\n");
+ irdma_destroy_qp(&iwqp->ibqp, udata);
+ return ERR_PTR(err_code);
+ }
+ }
+
+ init_completion(&iwqp->free_qp);
+ return &iwqp->ibqp;
+
+error:
+ irdma_free_qp_rsrc(iwqp);
+
+ return ERR_PTR(err_code);
+}
+
+static int irdma_get_ib_acc_flags(struct irdma_qp *iwqp)
+{
+ int acc_flags = 0;
+
+ if (rdma_protocol_roce(iwqp->ibqp.device, 1)) {
+ if (iwqp->roce_info.wr_rdresp_en) {
+ acc_flags |= IB_ACCESS_LOCAL_WRITE;
+ acc_flags |= IB_ACCESS_REMOTE_WRITE;
+ }
+ if (iwqp->roce_info.rd_en)
+ acc_flags |= IB_ACCESS_REMOTE_READ;
+ if (iwqp->roce_info.bind_en)
+ acc_flags |= IB_ACCESS_MW_BIND;
+ } else {
+ if (iwqp->iwarp_info.wr_rdresp_en) {
+ acc_flags |= IB_ACCESS_LOCAL_WRITE;
+ acc_flags |= IB_ACCESS_REMOTE_WRITE;
+ }
+ if (iwqp->iwarp_info.rd_en)
+ acc_flags |= IB_ACCESS_REMOTE_READ;
+ if (iwqp->iwarp_info.bind_en)
+ acc_flags |= IB_ACCESS_MW_BIND;
+ }
+ return acc_flags;
+}
+
+/**
+ * irdma_query_qp - query qp attributes
+ * @ibqp: qp pointer
+ * @attr: attributes pointer
+ * @attr_mask: Not used
+ * @init_attr: qp attributes to return
+ */
+static int irdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_qp_init_attr *init_attr)
+{
+ struct irdma_qp *iwqp = to_iwqp(ibqp);
+ struct irdma_sc_qp *qp = &iwqp->sc_qp;
+
+ memset(attr, 0, sizeof(*attr));
+ memset(init_attr, 0, sizeof(*init_attr));
+
+ attr->qp_state = iwqp->ibqp_state;
+ attr->cur_qp_state = iwqp->ibqp_state;
+ attr->cap.max_send_wr = iwqp->max_send_wr;
+ attr->cap.max_recv_wr = iwqp->max_recv_wr;
+ attr->cap.max_inline_data = qp->qp_uk.max_inline_data;
+ attr->cap.max_send_sge = qp->qp_uk.max_sq_frag_cnt;
+ attr->cap.max_recv_sge = qp->qp_uk.max_rq_frag_cnt;
+ attr->qp_access_flags = irdma_get_ib_acc_flags(iwqp);
+ attr->port_num = 1;
+ if (rdma_protocol_roce(ibqp->device, 1)) {
+ attr->path_mtu = ib_mtu_int_to_enum(iwqp->udp_info.snd_mss);
+ attr->qkey = iwqp->roce_info.qkey;
+ attr->rq_psn = iwqp->udp_info.epsn;
+ attr->sq_psn = iwqp->udp_info.psn_nxt;
+ attr->dest_qp_num = iwqp->roce_info.dest_qp;
+ attr->pkey_index = iwqp->roce_info.p_key;
+ attr->retry_cnt = iwqp->udp_info.rexmit_thresh;
+ attr->rnr_retry = iwqp->udp_info.rnr_nak_thresh;
+ attr->max_rd_atomic = iwqp->roce_info.ord_size;
+ attr->max_dest_rd_atomic = iwqp->roce_info.ird_size;
+ }
+
+ init_attr->event_handler = iwqp->ibqp.event_handler;
+ init_attr->qp_context = iwqp->ibqp.qp_context;
+ init_attr->send_cq = iwqp->ibqp.send_cq;
+ init_attr->recv_cq = iwqp->ibqp.recv_cq;
+ init_attr->cap = attr->cap;
+
+ return 0;
+}
+
+/**
+ * irdma_query_pkey - Query partition key
+ * @ibdev: device pointer from stack
+ * @port: port number
+ * @index: index of pkey
+ * @pkey: pointer to store the pkey
+ */
+static int irdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
+ u16 *pkey)
+{
+ if (index >= IRDMA_PKEY_TBL_SZ)
+ return -EINVAL;
+
+ *pkey = IRDMA_DEFAULT_PKEY;
+ return 0;
+}
+
+/**
+ * irdma_modify_qp_roce - modify qp request
+ * @ibqp: qp's pointer for modify
+ * @attr: access attributes
+ * @attr_mask: state mask
+ * @udata: user data
+ */
+int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ struct irdma_pd *iwpd = to_iwpd(ibqp->pd);
+ struct irdma_qp *iwqp = to_iwqp(ibqp);
+ struct irdma_device *iwdev = iwqp->iwdev;
+ struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
+ struct irdma_qp_host_ctx_info *ctx_info;
+ struct irdma_roce_offload_info *roce_info;
+ struct irdma_udp_offload_info *udp_info;
+ struct irdma_modify_qp_info info = {};
+ struct irdma_modify_qp_resp uresp = {};
+ struct irdma_modify_qp_req ureq = {};
+ unsigned long flags;
+ u8 issue_modify_qp = 0;
+ int ret = 0;
+
+ ctx_info = &iwqp->ctx_info;
+ roce_info = &iwqp->roce_info;
+ udp_info = &iwqp->udp_info;
+
+ if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
+ return -EOPNOTSUPP;
+
+ if (attr_mask & IB_QP_DEST_QPN)
+ roce_info->dest_qp = attr->dest_qp_num;
+
+ if (attr_mask & IB_QP_PKEY_INDEX) {
+ ret = irdma_query_pkey(ibqp->device, 0, attr->pkey_index,
+ &roce_info->p_key);
+ if (ret)
+ return ret;
+ }
+
+ if (attr_mask & IB_QP_QKEY)
+ roce_info->qkey = attr->qkey;
+
+ if (attr_mask & IB_QP_PATH_MTU)
+ udp_info->snd_mss = ib_mtu_enum_to_int(attr->path_mtu);
+
+ if (attr_mask & IB_QP_SQ_PSN) {
+ udp_info->psn_nxt = attr->sq_psn;
+ udp_info->lsn = 0xffff;
+ udp_info->psn_una = attr->sq_psn;
+ udp_info->psn_max = attr->sq_psn;
+ }
+
+ if (attr_mask & IB_QP_RQ_PSN)
+ udp_info->epsn = attr->rq_psn;
+
+ if (attr_mask & IB_QP_RNR_RETRY)
+ udp_info->rnr_nak_thresh = attr->rnr_retry;
+
+ if (attr_mask & IB_QP_RETRY_CNT)
+ udp_info->rexmit_thresh = attr->retry_cnt;
+
+ ctx_info->roce_info->pd_id = iwpd->sc_pd.pd_id;
+
+ if (attr_mask & IB_QP_AV) {
+ struct irdma_av *av = &iwqp->roce_ah.av;
+ const struct ib_gid_attr *sgid_attr;
+ u16 vlan_id = VLAN_N_VID;
+ u32 local_ip[4];
+
+ memset(&iwqp->roce_ah, 0, sizeof(iwqp->roce_ah));
+ if (attr->ah_attr.ah_flags & IB_AH_GRH) {
+ udp_info->ttl = attr->ah_attr.grh.hop_limit;
+ udp_info->flow_label = attr->ah_attr.grh.flow_label;
+ udp_info->tos = attr->ah_attr.grh.traffic_class;
+ irdma_qp_rem_qos(&iwqp->sc_qp);
+ dev->ws_remove(iwqp->sc_qp.vsi, ctx_info->user_pri);
+ ctx_info->user_pri = rt_tos2priority(udp_info->tos);
+ iwqp->sc_qp.user_pri = ctx_info->user_pri;
+ if (dev->ws_add(iwqp->sc_qp.vsi, ctx_info->user_pri))
+ return -ENOMEM;
+ irdma_qp_add_qos(&iwqp->sc_qp);
+ }
+ sgid_attr = attr->ah_attr.grh.sgid_attr;
+ ret = rdma_read_gid_l2_fields(sgid_attr, &vlan_id,
+ ctx_info->roce_info->mac_addr);
+ if (ret)
+ return ret;
+
+ if (vlan_id >= VLAN_N_VID && iwdev->dcb)
+ vlan_id = 0;
+ if (vlan_id < VLAN_N_VID) {
+ udp_info->insert_vlan_tag = true;
+ udp_info->vlan_tag = vlan_id |
+ ctx_info->user_pri << VLAN_PRIO_SHIFT;
+ } else {
+ udp_info->insert_vlan_tag = false;
+ }
+
+ av->attrs = attr->ah_attr;
+ rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid_attr->gid);
+ rdma_gid2ip((struct sockaddr *)&av->dgid_addr, &attr->ah_attr.grh.dgid);
+ roce_info->local_qp = ibqp->qp_num;
+ if (av->sgid_addr.saddr.sa_family == AF_INET6) {
+ __be32 *daddr =
+ av->dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32;
+ __be32 *saddr =
+ av->sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32;
+
+ irdma_copy_ip_ntohl(&udp_info->dest_ip_addr[0], daddr);
+ irdma_copy_ip_ntohl(&udp_info->local_ipaddr[0], saddr);
+
+ udp_info->ipv4 = false;
+ irdma_copy_ip_ntohl(local_ip, daddr);
+
+ udp_info->arp_idx = irdma_arp_table(iwdev->rf,
+ &local_ip[0],
+ false, NULL,
+ IRDMA_ARP_RESOLVE);
+ } else {
+ __be32 saddr = av->sgid_addr.saddr_in.sin_addr.s_addr;
+ __be32 daddr = av->dgid_addr.saddr_in.sin_addr.s_addr;
+
+ local_ip[0] = ntohl(daddr);
+
+ udp_info->ipv4 = true;
+ udp_info->dest_ip_addr[0] = 0;
+ udp_info->dest_ip_addr[1] = 0;
+ udp_info->dest_ip_addr[2] = 0;
+ udp_info->dest_ip_addr[3] = local_ip[0];
+
+ udp_info->local_ipaddr[0] = 0;
+ udp_info->local_ipaddr[1] = 0;
+ udp_info->local_ipaddr[2] = 0;
+ udp_info->local_ipaddr[3] = ntohl(saddr);
+ }
+ udp_info->arp_idx =
+ irdma_add_arp(iwdev->rf, local_ip, udp_info->ipv4,
+ attr->ah_attr.roce.dmac);
+ }
+
+ if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
+ if (attr->max_rd_atomic > dev->hw_attrs.max_hw_ord) {
+ ibdev_err(&iwdev->ibdev,
+ "rd_atomic = %d, above max_hw_ord=%d\n",
+ attr->max_rd_atomic,
+ dev->hw_attrs.max_hw_ord);
+ return -EINVAL;
+ }
+ if (attr->max_rd_atomic)
+ roce_info->ord_size = attr->max_rd_atomic;
+ info.ord_valid = true;
+ }
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
+ if (attr->max_dest_rd_atomic > dev->hw_attrs.max_hw_ird) {
+ ibdev_err(&iwdev->ibdev,
+ "rd_atomic = %d, above max_hw_ird=%d\n",
+ attr->max_rd_atomic,
+ dev->hw_attrs.max_hw_ird);
+ return -EINVAL;
+ }
+ if (attr->max_dest_rd_atomic)
+ roce_info->ird_size = attr->max_dest_rd_atomic;
+ }
+
+ if (attr_mask & IB_QP_ACCESS_FLAGS) {
+ if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
+ roce_info->wr_rdresp_en = true;
+ if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
+ roce_info->wr_rdresp_en = true;
+ if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
+ roce_info->rd_en = true;
+ }
+
+ wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend));
+
+ ibdev_dbg(&iwdev->ibdev,
+ "VERBS: caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d attr_mask=0x%x\n",
+ __builtin_return_address(0), ibqp->qp_num, attr->qp_state,
+ iwqp->ibqp_state, iwqp->iwarp_state, attr_mask);
+
+ spin_lock_irqsave(&iwqp->lock, flags);
+ if (attr_mask & IB_QP_STATE) {
+ if (!ib_modify_qp_is_ok(iwqp->ibqp_state, attr->qp_state,
+ iwqp->ibqp.qp_type, attr_mask)) {
+ ibdev_warn(&iwdev->ibdev, "modify_qp invalid for qp_id=%d, old_state=0x%x, new_state=0x%x\n",
+ iwqp->ibqp.qp_num, iwqp->ibqp_state,
+ attr->qp_state);
+ ret = -EINVAL;
+ goto exit;
+ }
+ info.curr_iwarp_state = iwqp->iwarp_state;
+
+ switch (attr->qp_state) {
+ case IB_QPS_INIT:
+ if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) {
+ info.next_iwarp_state = IRDMA_QP_STATE_IDLE;
+ issue_modify_qp = 1;
+ }
+ break;
+ case IB_QPS_RTR:
+ if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ info.arp_cache_idx_valid = true;
+ info.cq_num_valid = true;
+ info.next_iwarp_state = IRDMA_QP_STATE_RTR;
+ issue_modify_qp = 1;
+ break;
+ case IB_QPS_RTS:
+ if (iwqp->ibqp_state < IB_QPS_RTR ||
+ iwqp->ibqp_state == IB_QPS_ERR) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ info.arp_cache_idx_valid = true;
+ info.cq_num_valid = true;
+ info.ord_valid = true;
+ info.next_iwarp_state = IRDMA_QP_STATE_RTS;
+ issue_modify_qp = 1;
+ if (iwdev->push_mode && udata &&
+ iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX &&
+ dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ irdma_alloc_push_page(iwqp);
+ spin_lock_irqsave(&iwqp->lock, flags);
+ }
+ break;
+ case IB_QPS_SQD:
+ if (iwqp->iwarp_state == IRDMA_QP_STATE_SQD)
+ goto exit;
+
+ if (iwqp->iwarp_state != IRDMA_QP_STATE_RTS) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ info.next_iwarp_state = IRDMA_QP_STATE_SQD;
+ issue_modify_qp = 1;
+ break;
+ case IB_QPS_SQE:
+ case IB_QPS_ERR:
+ case IB_QPS_RESET:
+ if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS) {
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ info.next_iwarp_state = IRDMA_QP_STATE_SQD;
+ irdma_hw_modify_qp(iwdev, iwqp, &info, true);
+ spin_lock_irqsave(&iwqp->lock, flags);
+ }
+
+ if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ if (udata) {
+ if (ib_copy_from_udata(&ureq, udata,
+ min(sizeof(ureq), udata->inlen)))
+ return -EINVAL;
+
+ irdma_flush_wqes(iwqp,
+ (ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) |
+ (ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) |
+ IRDMA_REFLUSH);
+ }
+ return 0;
+ }
+
+ info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
+ issue_modify_qp = 1;
+ break;
+ default:
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ iwqp->ibqp_state = attr->qp_state;
+ }
+
+ ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
+ ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
+ irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+
+ if (attr_mask & IB_QP_STATE) {
+ if (issue_modify_qp) {
+ ctx_info->rem_endpoint_idx = udp_info->arp_idx;
+ if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
+ return -EINVAL;
+ spin_lock_irqsave(&iwqp->lock, flags);
+ if (iwqp->iwarp_state == info.curr_iwarp_state) {
+ iwqp->iwarp_state = info.next_iwarp_state;
+ iwqp->ibqp_state = attr->qp_state;
+ }
+ if (iwqp->ibqp_state > IB_QPS_RTS &&
+ !iwqp->flush_issued) {
+ iwqp->flush_issued = 1;
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ |
+ IRDMA_FLUSH_RQ |
+ IRDMA_FLUSH_WAIT);
+ } else {
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ }
+ } else {
+ iwqp->ibqp_state = attr->qp_state;
+ }
+ if (udata && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ struct irdma_ucontext *ucontext;
+
+ ucontext = rdma_udata_to_drv_context(udata,
+ struct irdma_ucontext, ibucontext);
+ if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX &&
+ !iwqp->push_wqe_mmap_entry &&
+ !irdma_setup_push_mmap_entries(ucontext, iwqp,
+ &uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) {
+ uresp.push_valid = 1;
+ uresp.push_offset = iwqp->sc_qp.push_offset;
+ }
+ ret = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp),
+ udata->outlen));
+ if (ret) {
+ irdma_remove_push_mmap_entries(iwqp);
+ ibdev_dbg(&iwdev->ibdev,
+ "VERBS: copy_to_udata failed\n");
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+exit:
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+
+ return ret;
+}
+
+/**
+ * irdma_modify_qp - modify qp request
+ * @ibqp: qp's pointer for modify
+ * @attr: access attributes
+ * @attr_mask: state mask
+ * @udata: user data
+ */
+int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
+ struct ib_udata *udata)
+{
+ struct irdma_qp *iwqp = to_iwqp(ibqp);
+ struct irdma_device *iwdev = iwqp->iwdev;
+ struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
+ struct irdma_qp_host_ctx_info *ctx_info;
+ struct irdma_tcp_offload_info *tcp_info;
+ struct irdma_iwarp_offload_info *offload_info;
+ struct irdma_modify_qp_info info = {};
+ struct irdma_modify_qp_resp uresp = {};
+ struct irdma_modify_qp_req ureq = {};
+ u8 issue_modify_qp = 0;
+ u8 dont_wait = 0;
+ int err;
+ unsigned long flags;
+
+ if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
+ return -EOPNOTSUPP;
+
+ ctx_info = &iwqp->ctx_info;
+ offload_info = &iwqp->iwarp_info;
+ tcp_info = &iwqp->tcp_info;
+ wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend));
+ ibdev_dbg(&iwdev->ibdev,
+ "VERBS: caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d last_aeq=%d hw_tcp_state=%d hw_iwarp_state=%d attr_mask=0x%x\n",
+ __builtin_return_address(0), ibqp->qp_num, attr->qp_state,
+ iwqp->ibqp_state, iwqp->iwarp_state, iwqp->last_aeq,
+ iwqp->hw_tcp_state, iwqp->hw_iwarp_state, attr_mask);
+
+ spin_lock_irqsave(&iwqp->lock, flags);
+ if (attr_mask & IB_QP_STATE) {
+ info.curr_iwarp_state = iwqp->iwarp_state;
+ switch (attr->qp_state) {
+ case IB_QPS_INIT:
+ case IB_QPS_RTR:
+ if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) {
+ info.next_iwarp_state = IRDMA_QP_STATE_IDLE;
+ issue_modify_qp = 1;
+ }
+ if (iwdev->push_mode && udata &&
+ iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX &&
+ dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ irdma_alloc_push_page(iwqp);
+ spin_lock_irqsave(&iwqp->lock, flags);
+ }
+ break;
+ case IB_QPS_RTS:
+ if (iwqp->iwarp_state > IRDMA_QP_STATE_RTS ||
+ !iwqp->cm_id) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ issue_modify_qp = 1;
+ iwqp->hw_tcp_state = IRDMA_TCP_STATE_ESTABLISHED;
+ iwqp->hte_added = 1;
+ info.next_iwarp_state = IRDMA_QP_STATE_RTS;
+ info.tcp_ctx_valid = true;
+ info.ord_valid = true;
+ info.arp_cache_idx_valid = true;
+ info.cq_num_valid = true;
+ break;
+ case IB_QPS_SQD:
+ if (iwqp->hw_iwarp_state > IRDMA_QP_STATE_RTS) {
+ err = 0;
+ goto exit;
+ }
+
+ if (iwqp->iwarp_state == IRDMA_QP_STATE_CLOSING ||
+ iwqp->iwarp_state < IRDMA_QP_STATE_RTS) {
+ err = 0;
+ goto exit;
+ }
+
+ if (iwqp->iwarp_state > IRDMA_QP_STATE_CLOSING) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ info.next_iwarp_state = IRDMA_QP_STATE_CLOSING;
+ issue_modify_qp = 1;
+ break;
+ case IB_QPS_SQE:
+ if (iwqp->iwarp_state >= IRDMA_QP_STATE_TERMINATE) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ info.next_iwarp_state = IRDMA_QP_STATE_TERMINATE;
+ issue_modify_qp = 1;
+ break;
+ case IB_QPS_ERR:
+ case IB_QPS_RESET:
+ if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ if (udata) {
+ if (ib_copy_from_udata(&ureq, udata,
+ min(sizeof(ureq), udata->inlen)))
+ return -EINVAL;
+
+ irdma_flush_wqes(iwqp,
+ (ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) |
+ (ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) |
+ IRDMA_REFLUSH);
+ }
+ return 0;
+ }
+
+ if (iwqp->sc_qp.term_flags) {
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ irdma_terminate_del_timer(&iwqp->sc_qp);
+ spin_lock_irqsave(&iwqp->lock, flags);
+ }
+ info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
+ if (iwqp->hw_tcp_state > IRDMA_TCP_STATE_CLOSED &&
+ iwdev->iw_status &&
+ iwqp->hw_tcp_state != IRDMA_TCP_STATE_TIME_WAIT)
+ info.reset_tcp_conn = true;
+ else
+ dont_wait = 1;
+
+ issue_modify_qp = 1;
+ info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
+ break;
+ default:
+ err = -EINVAL;
+ goto exit;
+ }
+
+ iwqp->ibqp_state = attr->qp_state;
+ }
+ if (attr_mask & IB_QP_ACCESS_FLAGS) {
+ ctx_info->iwarp_info_valid = true;
+ if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
+ offload_info->wr_rdresp_en = true;
+ if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
+ offload_info->wr_rdresp_en = true;
+ if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
+ offload_info->rd_en = true;
+ }
+
+ if (ctx_info->iwarp_info_valid) {
+ ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
+ ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
+ irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
+ }
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+
+ if (attr_mask & IB_QP_STATE) {
+ if (issue_modify_qp) {
+ ctx_info->rem_endpoint_idx = tcp_info->arp_idx;
+ if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&iwqp->lock, flags);
+ if (iwqp->iwarp_state == info.curr_iwarp_state) {
+ iwqp->iwarp_state = info.next_iwarp_state;
+ iwqp->ibqp_state = attr->qp_state;
+ }
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ }
+
+ if (issue_modify_qp && iwqp->ibqp_state > IB_QPS_RTS) {
+ if (dont_wait) {
+ if (iwqp->cm_id && iwqp->hw_tcp_state) {
+ spin_lock_irqsave(&iwqp->lock, flags);
+ iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSED;
+ iwqp->last_aeq = IRDMA_AE_RESET_SENT;
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ irdma_cm_disconn(iwqp);
+ }
+ } else {
+ int close_timer_started;
+
+ spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags);
+
+ if (iwqp->cm_node) {
+ refcount_inc(&iwqp->cm_node->refcnt);
+ spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
+ close_timer_started = atomic_inc_return(&iwqp->close_timer_started);
+ if (iwqp->cm_id && close_timer_started == 1)
+ irdma_schedule_cm_timer(iwqp->cm_node,
+ (struct irdma_puda_buf *)iwqp,
+ IRDMA_TIMER_TYPE_CLOSE, 1, 0);
+
+ irdma_rem_ref_cm_node(iwqp->cm_node);
+ } else {
+ spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
+ }
+ }
+ }
+ if (attr_mask & IB_QP_STATE && udata &&
+ dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ struct irdma_ucontext *ucontext;
+
+ ucontext = rdma_udata_to_drv_context(udata,
+ struct irdma_ucontext, ibucontext);
+ if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX &&
+ !iwqp->push_wqe_mmap_entry &&
+ !irdma_setup_push_mmap_entries(ucontext, iwqp,
+ &uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) {
+ uresp.push_valid = 1;
+ uresp.push_offset = iwqp->sc_qp.push_offset;
+ }
+
+ err = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp),
+ udata->outlen));
+ if (err) {
+ irdma_remove_push_mmap_entries(iwqp);
+ ibdev_dbg(&iwdev->ibdev,
+ "VERBS: copy_to_udata failed\n");
+ return err;
+ }
+ }
+
+ return 0;
+exit:
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+
+ return err;
+}
+
+/**
+ * irdma_cq_free_rsrc - free up resources for cq
+ * @rf: RDMA PCI function
+ * @iwcq: cq ptr
+ */
+static void irdma_cq_free_rsrc(struct irdma_pci_f *rf, struct irdma_cq *iwcq)
+{
+ struct irdma_sc_cq *cq = &iwcq->sc_cq;
+
+ if (!iwcq->user_mode) {
+ dma_free_coherent(rf->sc_dev.hw->device, iwcq->kmem.size,
+ iwcq->kmem.va, iwcq->kmem.pa);
+ iwcq->kmem.va = NULL;
+ dma_free_coherent(rf->sc_dev.hw->device,
+ iwcq->kmem_shadow.size,
+ iwcq->kmem_shadow.va, iwcq->kmem_shadow.pa);
+ iwcq->kmem_shadow.va = NULL;
+ }
+
+ irdma_free_rsrc(rf, rf->allocated_cqs, cq->cq_uk.cq_id);
+}
+
+/**
+ * irdma_free_cqbuf - worker to free a cq buffer
+ * @work: provides access to the cq buffer to free
+ */
+static void irdma_free_cqbuf(struct work_struct *work)
+{
+ struct irdma_cq_buf *cq_buf = container_of(work, struct irdma_cq_buf, work);
+
+ dma_free_coherent(cq_buf->hw->device, cq_buf->kmem_buf.size,
+ cq_buf->kmem_buf.va, cq_buf->kmem_buf.pa);
+ cq_buf->kmem_buf.va = NULL;
+ kfree(cq_buf);
+}
+
+/**
+ * irdma_process_resize_list - remove resized cq buffers from the resize_list
+ * @iwcq: cq which owns the resize_list
+ * @iwdev: irdma device
+ * @lcqe_buf: the buffer where the last cqe is received
+ */
+static int irdma_process_resize_list(struct irdma_cq *iwcq,
+ struct irdma_device *iwdev,
+ struct irdma_cq_buf *lcqe_buf)
+{
+ struct list_head *tmp_node, *list_node;
+ struct irdma_cq_buf *cq_buf;
+ int cnt = 0;
+
+ list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) {
+ cq_buf = list_entry(list_node, struct irdma_cq_buf, list);
+ if (cq_buf == lcqe_buf)
+ return cnt;
+
+ list_del(&cq_buf->list);
+ queue_work(iwdev->cleanup_wq, &cq_buf->work);
+ cnt++;
+ }
+
+ return cnt;
+}
+
+/**
+ * irdma_destroy_cq - destroy cq
+ * @ib_cq: cq pointer
+ * @udata: user data
+ */
+static int irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
+{
+ struct irdma_device *iwdev = to_iwdev(ib_cq->device);
+ struct irdma_cq *iwcq = to_iwcq(ib_cq);
+ struct irdma_sc_cq *cq = &iwcq->sc_cq;
+ struct irdma_sc_dev *dev = cq->dev;
+ struct irdma_sc_ceq *ceq = dev->ceq[cq->ceq_id];
+ struct irdma_ceq *iwceq = container_of(ceq, struct irdma_ceq, sc_ceq);
+ unsigned long flags;
+
+ spin_lock_irqsave(&iwcq->lock, flags);
+ if (!list_empty(&iwcq->resize_list))
+ irdma_process_resize_list(iwcq, iwdev, NULL);
+ spin_unlock_irqrestore(&iwcq->lock, flags);
+
+ irdma_cq_wq_destroy(iwdev->rf, cq);
+ irdma_cq_free_rsrc(iwdev->rf, iwcq);
+
+ spin_lock_irqsave(&iwceq->ce_lock, flags);
+ irdma_sc_cleanup_ceqes(cq, ceq);
+ spin_unlock_irqrestore(&iwceq->ce_lock, flags);
+
+ return 0;
+}
+
+/**
+ * irdma_resize_cq - resize cq
+ * @ibcq: cq to be resized
+ * @entries: desired cq size
+ * @udata: user data
+ */
+static int irdma_resize_cq(struct ib_cq *ibcq, int entries,
+ struct ib_udata *udata)
+{
+ struct irdma_cq *iwcq = to_iwcq(ibcq);
+ struct irdma_sc_dev *dev = iwcq->sc_cq.dev;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_modify_cq_info *m_info;
+ struct irdma_modify_cq_info info = {};
+ struct irdma_dma_mem kmem_buf;
+ struct irdma_cq_mr *cqmr_buf;
+ struct irdma_pbl *iwpbl_buf;
+ struct irdma_device *iwdev;
+ struct irdma_pci_f *rf;
+ struct irdma_cq_buf *cq_buf = NULL;
+ enum irdma_status_code status = 0;
+ unsigned long flags;
+ int ret;
+
+ iwdev = to_iwdev(ibcq->device);
+ rf = iwdev->rf;
+
+ if (!(rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
+ IRDMA_FEATURE_CQ_RESIZE))
+ return -EOPNOTSUPP;
+
+ if (entries > rf->max_cqe)
+ return -EINVAL;
+
+ if (!iwcq->user_mode) {
+ entries++;
+ if (rf->sc_dev.hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
+ entries *= 2;
+ }
+
+ info.cq_size = max(entries, 4);
+
+ if (info.cq_size == iwcq->sc_cq.cq_uk.cq_size - 1)
+ return 0;
+
+ if (udata) {
+ struct irdma_resize_cq_req req = {};
+ struct irdma_ucontext *ucontext =
+ rdma_udata_to_drv_context(udata, struct irdma_ucontext,
+ ibucontext);
+
+ /* CQ resize not supported with legacy GEN_1 libi40iw */
+ if (ucontext->legacy_mode)
+ return -EOPNOTSUPP;
+
+ if (ib_copy_from_udata(&req, udata,
+ min(sizeof(req), udata->inlen)))
+ return -EINVAL;
+
+ spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
+ iwpbl_buf = irdma_get_pbl((unsigned long)req.user_cq_buffer,
+ &ucontext->cq_reg_mem_list);
+ spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
+
+ if (!iwpbl_buf)
+ return -ENOMEM;
+
+ cqmr_buf = &iwpbl_buf->cq_mr;
+ if (iwpbl_buf->pbl_allocated) {
+ info.virtual_map = true;
+ info.pbl_chunk_size = 1;
+ info.first_pm_pbl_idx = cqmr_buf->cq_pbl.idx;
+ } else {
+ info.cq_pa = cqmr_buf->cq_pbl.addr;
+ }
+ } else {
+ /* Kmode CQ resize */
+ int rsize;
+
+ rsize = info.cq_size * sizeof(struct irdma_cqe);
+ kmem_buf.size = ALIGN(round_up(rsize, 256), 256);
+ kmem_buf.va = dma_alloc_coherent(dev->hw->device,
+ kmem_buf.size, &kmem_buf.pa,
+ GFP_KERNEL);
+ if (!kmem_buf.va)
+ return -ENOMEM;
+
+ info.cq_base = kmem_buf.va;
+ info.cq_pa = kmem_buf.pa;
+ cq_buf = kzalloc(sizeof(*cq_buf), GFP_KERNEL);
+ if (!cq_buf) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ }
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
+ if (!cqp_request) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ info.shadow_read_threshold = iwcq->sc_cq.shadow_read_threshold;
+ info.cq_resize = true;
+
+ cqp_info = &cqp_request->info;
+ m_info = &cqp_info->in.u.cq_modify.info;
+ memcpy(m_info, &info, sizeof(*m_info));
+
+ cqp_info->cqp_cmd = IRDMA_OP_CQ_MODIFY;
+ cqp_info->in.u.cq_modify.cq = &iwcq->sc_cq;
+ cqp_info->in.u.cq_modify.scratch = (uintptr_t)cqp_request;
+ cqp_info->post_sq = 1;
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+ if (status) {
+ ret = -EPROTO;
+ goto error;
+ }
+
+ spin_lock_irqsave(&iwcq->lock, flags);
+ if (cq_buf) {
+ cq_buf->kmem_buf = iwcq->kmem;
+ cq_buf->hw = dev->hw;
+ memcpy(&cq_buf->cq_uk, &iwcq->sc_cq.cq_uk, sizeof(cq_buf->cq_uk));
+ INIT_WORK(&cq_buf->work, irdma_free_cqbuf);
+ list_add_tail(&cq_buf->list, &iwcq->resize_list);
+ iwcq->kmem = kmem_buf;
+ }
+
+ irdma_sc_cq_resize(&iwcq->sc_cq, &info);
+ ibcq->cqe = info.cq_size - 1;
+ spin_unlock_irqrestore(&iwcq->lock, flags);
+
+ return 0;
+error:
+ if (!udata) {
+ dma_free_coherent(dev->hw->device, kmem_buf.size, kmem_buf.va,
+ kmem_buf.pa);
+ kmem_buf.va = NULL;
+ }
+ kfree(cq_buf);
+
+ return ret;
+}
+
+static inline int cq_validate_flags(u32 flags, u8 hw_rev)
+{
+ /* GEN1 does not support CQ create flags */
+ if (hw_rev == IRDMA_GEN_1)
+ return flags ? -EOPNOTSUPP : 0;
+
+ return flags & ~IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION ? -EOPNOTSUPP : 0;
+}
+
+/**
+ * irdma_create_cq - create cq
+ * @ibcq: CQ allocated
+ * @attr: attributes for cq
+ * @udata: user data
+ */
+static int irdma_create_cq(struct ib_cq *ibcq,
+ const struct ib_cq_init_attr *attr,
+ struct ib_udata *udata)
+{
+ struct ib_device *ibdev = ibcq->device;
+ struct irdma_device *iwdev = to_iwdev(ibdev);
+ struct irdma_pci_f *rf = iwdev->rf;
+ struct irdma_cq *iwcq = to_iwcq(ibcq);
+ u32 cq_num = 0;
+ struct irdma_sc_cq *cq;
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct irdma_cq_init_info info = {};
+ enum irdma_status_code status;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_cq_uk_init_info *ukinfo = &info.cq_uk_init_info;
+ unsigned long flags;
+ int err_code;
+ int entries = attr->cqe;
+
+ err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev);
+ if (err_code)
+ return err_code;
+ err_code = irdma_alloc_rsrc(rf, rf->allocated_cqs, rf->max_cq, &cq_num,
+ &rf->next_cq);
+ if (err_code)
+ return err_code;
+
+ cq = &iwcq->sc_cq;
+ cq->back_cq = iwcq;
+ spin_lock_init(&iwcq->lock);
+ INIT_LIST_HEAD(&iwcq->resize_list);
+ info.dev = dev;
+ ukinfo->cq_size = max(entries, 4);
+ ukinfo->cq_id = cq_num;
+ iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
+ if (attr->comp_vector < rf->ceqs_count)
+ info.ceq_id = attr->comp_vector;
+ info.ceq_id_valid = true;
+ info.ceqe_mask = 1;
+ info.type = IRDMA_CQ_TYPE_IWARP;
+ info.vsi = &iwdev->vsi;
+
+ if (udata) {
+ struct irdma_ucontext *ucontext;
+ struct irdma_create_cq_req req = {};
+ struct irdma_cq_mr *cqmr;
+ struct irdma_pbl *iwpbl;
+ struct irdma_pbl *iwpbl_shadow;
+ struct irdma_cq_mr *cqmr_shadow;
+
+ iwcq->user_mode = true;
+ ucontext =
+ rdma_udata_to_drv_context(udata, struct irdma_ucontext,
+ ibucontext);
+ if (ib_copy_from_udata(&req, udata,
+ min(sizeof(req), udata->inlen))) {
+ err_code = -EFAULT;
+ goto cq_free_rsrc;
+ }
+
+ spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
+ iwpbl = irdma_get_pbl((unsigned long)req.user_cq_buf,
+ &ucontext->cq_reg_mem_list);
+ spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
+ if (!iwpbl) {
+ err_code = -EPROTO;
+ goto cq_free_rsrc;
+ }
+
+ iwcq->iwpbl = iwpbl;
+ iwcq->cq_mem_size = 0;
+ cqmr = &iwpbl->cq_mr;
+
+ if (rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
+ IRDMA_FEATURE_CQ_RESIZE && !ucontext->legacy_mode) {
+ spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
+ iwpbl_shadow = irdma_get_pbl(
+ (unsigned long)req.user_shadow_area,
+ &ucontext->cq_reg_mem_list);
+ spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
+
+ if (!iwpbl_shadow) {
+ err_code = -EPROTO;
+ goto cq_free_rsrc;
+ }
+ iwcq->iwpbl_shadow = iwpbl_shadow;
+ cqmr_shadow = &iwpbl_shadow->cq_mr;
+ info.shadow_area_pa = cqmr_shadow->cq_pbl.addr;
+ cqmr->split = true;
+ } else {
+ info.shadow_area_pa = cqmr->shadow;
+ }
+ if (iwpbl->pbl_allocated) {
+ info.virtual_map = true;
+ info.pbl_chunk_size = 1;
+ info.first_pm_pbl_idx = cqmr->cq_pbl.idx;
+ } else {
+ info.cq_base_pa = cqmr->cq_pbl.addr;
+ }
+ } else {
+ /* Kmode allocations */
+ int rsize;
+
+ if (entries > rf->max_cqe) {
+ err_code = -EINVAL;
+ goto cq_free_rsrc;
+ }
+
+ entries++;
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
+ entries *= 2;
+ ukinfo->cq_size = entries;
+
+ rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_cqe);
+ iwcq->kmem.size = ALIGN(round_up(rsize, 256), 256);
+ iwcq->kmem.va = dma_alloc_coherent(dev->hw->device,
+ iwcq->kmem.size,
+ &iwcq->kmem.pa, GFP_KERNEL);
+ if (!iwcq->kmem.va) {
+ err_code = -ENOMEM;
+ goto cq_free_rsrc;
+ }
+
+ iwcq->kmem_shadow.size = ALIGN(IRDMA_SHADOW_AREA_SIZE << 3,
+ 64);
+ iwcq->kmem_shadow.va = dma_alloc_coherent(dev->hw->device,
+ iwcq->kmem_shadow.size,
+ &iwcq->kmem_shadow.pa,
+ GFP_KERNEL);
+ if (!iwcq->kmem_shadow.va) {
+ err_code = -ENOMEM;
+ goto cq_free_rsrc;
+ }
+ info.shadow_area_pa = iwcq->kmem_shadow.pa;
+ ukinfo->shadow_area = iwcq->kmem_shadow.va;
+ ukinfo->cq_base = iwcq->kmem.va;
+ info.cq_base_pa = iwcq->kmem.pa;
+ }
+
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
+ info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2,
+ (u32)IRDMA_MAX_CQ_READ_THRESH);
+
+ if (irdma_sc_cq_init(cq, &info)) {
+ ibdev_dbg(&iwdev->ibdev, "VERBS: init cq fail\n");
+ err_code = -EPROTO;
+ goto cq_free_rsrc;
+ }
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
+ if (!cqp_request) {
+ err_code = -ENOMEM;
+ goto cq_free_rsrc;
+ }
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = IRDMA_OP_CQ_CREATE;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.cq_create.cq = cq;
+ cqp_info->in.u.cq_create.check_overflow = true;
+ cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+ if (status) {
+ err_code = -ENOMEM;
+ goto cq_free_rsrc;
+ }
+
+ if (udata) {
+ struct irdma_create_cq_resp resp = {};
+
+ resp.cq_id = info.cq_uk_init_info.cq_id;
+ resp.cq_size = info.cq_uk_init_info.cq_size;
+ if (ib_copy_to_udata(udata, &resp,
+ min(sizeof(resp), udata->outlen))) {
+ ibdev_dbg(&iwdev->ibdev,
+ "VERBS: copy to user data\n");
+ err_code = -EPROTO;
+ goto cq_destroy;
+ }
+ }
+ return 0;
+cq_destroy:
+ irdma_cq_wq_destroy(rf, cq);
+cq_free_rsrc:
+ irdma_cq_free_rsrc(rf, iwcq);
+
+ return err_code;
+}
+
+/**
+ * irdma_get_mr_access - get hw MR access permissions from IB access flags
+ * @access: IB access flags
+ */
+static inline u16 irdma_get_mr_access(int access)
+{
+ u16 hw_access = 0;
+
+ hw_access |= (access & IB_ACCESS_LOCAL_WRITE) ?
+ IRDMA_ACCESS_FLAGS_LOCALWRITE : 0;
+ hw_access |= (access & IB_ACCESS_REMOTE_WRITE) ?
+ IRDMA_ACCESS_FLAGS_REMOTEWRITE : 0;
+ hw_access |= (access & IB_ACCESS_REMOTE_READ) ?
+ IRDMA_ACCESS_FLAGS_REMOTEREAD : 0;
+ hw_access |= (access & IB_ACCESS_MW_BIND) ?
+ IRDMA_ACCESS_FLAGS_BIND_WINDOW : 0;
+ hw_access |= (access & IB_ZERO_BASED) ?
+ IRDMA_ACCESS_FLAGS_ZERO_BASED : 0;
+ hw_access |= IRDMA_ACCESS_FLAGS_LOCALREAD;
+
+ return hw_access;
+}
+
+/**
+ * irdma_free_stag - free stag resource
+ * @iwdev: irdma device
+ * @stag: stag to free
+ */
+static void irdma_free_stag(struct irdma_device *iwdev, u32 stag)
+{
+ u32 stag_idx;
+
+ stag_idx = (stag & iwdev->rf->mr_stagmask) >> IRDMA_CQPSQ_STAG_IDX_S;
+ irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_mrs, stag_idx);
+}
+
+/**
+ * irdma_create_stag - create random stag
+ * @iwdev: irdma device
+ */
+static u32 irdma_create_stag(struct irdma_device *iwdev)
+{
+ u32 stag = 0;
+ u32 stag_index = 0;
+ u32 next_stag_index;
+ u32 driver_key;
+ u32 random;
+ u8 consumer_key;
+ int ret;
+
+ get_random_bytes(&random, sizeof(random));
+ consumer_key = (u8)random;
+
+ driver_key = random & ~iwdev->rf->mr_stagmask;
+ next_stag_index = (random & iwdev->rf->mr_stagmask) >> 8;
+ next_stag_index %= iwdev->rf->max_mr;
+
+ ret = irdma_alloc_rsrc(iwdev->rf, iwdev->rf->allocated_mrs,
+ iwdev->rf->max_mr, &stag_index,
+ &next_stag_index);
+ if (ret)
+ return stag;
+ stag = stag_index << IRDMA_CQPSQ_STAG_IDX_S;
+ stag |= driver_key;
+ stag += (u32)consumer_key;
+
+ return stag;
+}
+
+/**
+ * irdma_next_pbl_addr - Get next pbl address
+ * @pbl: pointer to a pble
+ * @pinfo: info pointer
+ * @idx: index
+ */
+static inline u64 *irdma_next_pbl_addr(u64 *pbl, struct irdma_pble_info **pinfo,
+ u32 *idx)
+{
+ *idx += 1;
+ if (!(*pinfo) || *idx != (*pinfo)->cnt)
+ return ++pbl;
+ *idx = 0;
+ (*pinfo)++;
+
+ return (*pinfo)->addr;
+}
+
+/**
+ * irdma_copy_user_pgaddrs - copy user page address to pble's os locally
+ * @iwmr: iwmr for IB's user page addresses
+ * @pbl: ple pointer to save 1 level or 0 level pble
+ * @level: indicated level 0, 1 or 2
+ */
+static void irdma_copy_user_pgaddrs(struct irdma_mr *iwmr, u64 *pbl,
+ enum irdma_pble_level level)
+{
+ struct ib_umem *region = iwmr->region;
+ struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
+ struct irdma_pble_info *pinfo;
+ struct ib_block_iter biter;
+ u32 idx = 0;
+ u32 pbl_cnt = 0;
+
+ pinfo = (level == PBLE_LEVEL_1) ? NULL : palloc->level2.leaf;
+
+ if (iwmr->type == IRDMA_MEMREG_TYPE_QP)
+ iwpbl->qp_mr.sq_page = sg_page(region->sg_head.sgl);
+
+ rdma_umem_for_each_dma_block(region, &biter, iwmr->page_size) {
+ *pbl = rdma_block_iter_dma_address(&biter);
+ if (++pbl_cnt == palloc->total_cnt)
+ break;
+ pbl = irdma_next_pbl_addr(pbl, &pinfo, &idx);
+ }
+}
+
+/**
+ * irdma_check_mem_contiguous - check if pbls stored in arr are contiguous
+ * @arr: lvl1 pbl array
+ * @npages: page count
+ * @pg_size: page size
+ *
+ */
+static bool irdma_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size)
+{
+ u32 pg_idx;
+
+ for (pg_idx = 0; pg_idx < npages; pg_idx++) {
+ if ((*arr + (pg_size * pg_idx)) != arr[pg_idx])
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * irdma_check_mr_contiguous - check if MR is physically contiguous
+ * @palloc: pbl allocation struct
+ * @pg_size: page size
+ */
+static bool irdma_check_mr_contiguous(struct irdma_pble_alloc *palloc,
+ u32 pg_size)
+{
+ struct irdma_pble_level2 *lvl2 = &palloc->level2;
+ struct irdma_pble_info *leaf = lvl2->leaf;
+ u64 *arr = NULL;
+ u64 *start_addr = NULL;
+ int i;
+ bool ret;
+
+ if (palloc->level == PBLE_LEVEL_1) {
+ arr = palloc->level1.addr;
+ ret = irdma_check_mem_contiguous(arr, palloc->total_cnt,
+ pg_size);
+ return ret;
+ }
+
+ start_addr = leaf->addr;
+
+ for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
+ arr = leaf->addr;
+ if ((*start_addr + (i * pg_size * PBLE_PER_PAGE)) != *arr)
+ return false;
+ ret = irdma_check_mem_contiguous(arr, leaf->cnt, pg_size);
+ if (!ret)
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * irdma_setup_pbles - copy user pg address to pble's
+ * @rf: RDMA PCI function
+ * @iwmr: mr pointer for this memory registration
+ * @use_pbles: flag if to use pble's
+ */
+static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr,
+ bool use_pbles)
+{
+ struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
+ struct irdma_pble_info *pinfo;
+ u64 *pbl;
+ enum irdma_status_code status;
+ enum irdma_pble_level level = PBLE_LEVEL_1;
+
+ if (use_pbles) {
+ status = irdma_get_pble(rf->pble_rsrc, palloc, iwmr->page_cnt,
+ false);
+ if (status)
+ return -ENOMEM;
+
+ iwpbl->pbl_allocated = true;
+ level = palloc->level;
+ pinfo = (level == PBLE_LEVEL_1) ? &palloc->level1 :
+ palloc->level2.leaf;
+ pbl = pinfo->addr;
+ } else {
+ pbl = iwmr->pgaddrmem;
+ }
+
+ irdma_copy_user_pgaddrs(iwmr, pbl, level);
+
+ if (use_pbles)
+ iwmr->pgaddrmem[0] = *pbl;
+
+ return 0;
+}
+
+/**
+ * irdma_handle_q_mem - handle memory for qp and cq
+ * @iwdev: irdma device
+ * @req: information for q memory management
+ * @iwpbl: pble struct
+ * @use_pbles: flag to use pble
+ */
+static int irdma_handle_q_mem(struct irdma_device *iwdev,
+ struct irdma_mem_reg_req *req,
+ struct irdma_pbl *iwpbl, bool use_pbles)
+{
+ struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
+ struct irdma_mr *iwmr = iwpbl->iwmr;
+ struct irdma_qp_mr *qpmr = &iwpbl->qp_mr;
+ struct irdma_cq_mr *cqmr = &iwpbl->cq_mr;
+ struct irdma_hmc_pble *hmc_p;
+ u64 *arr = iwmr->pgaddrmem;
+ u32 pg_size, total;
+ int err = 0;
+ bool ret = true;
+
+ pg_size = iwmr->page_size;
+ err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles);
+ if (err)
+ return err;
+
+ if (use_pbles && palloc->level != PBLE_LEVEL_1) {
+ irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
+ iwpbl->pbl_allocated = false;
+ return -ENOMEM;
+ }
+
+ if (use_pbles)
+ arr = palloc->level1.addr;
+
+ switch (iwmr->type) {
+ case IRDMA_MEMREG_TYPE_QP:
+ total = req->sq_pages + req->rq_pages;
+ hmc_p = &qpmr->sq_pbl;
+ qpmr->shadow = (dma_addr_t)arr[total];
+
+ if (use_pbles) {
+ ret = irdma_check_mem_contiguous(arr, req->sq_pages,
+ pg_size);
+ if (ret)
+ ret = irdma_check_mem_contiguous(&arr[req->sq_pages],
+ req->rq_pages,
+ pg_size);
+ }
+
+ if (!ret) {
+ hmc_p->idx = palloc->level1.idx;
+ hmc_p = &qpmr->rq_pbl;
+ hmc_p->idx = palloc->level1.idx + req->sq_pages;
+ } else {
+ hmc_p->addr = arr[0];
+ hmc_p = &qpmr->rq_pbl;
+ hmc_p->addr = arr[req->sq_pages];
+ }
+ break;
+ case IRDMA_MEMREG_TYPE_CQ:
+ hmc_p = &cqmr->cq_pbl;
+
+ if (!cqmr->split)
+ cqmr->shadow = (dma_addr_t)arr[req->cq_pages];
+
+ if (use_pbles)
+ ret = irdma_check_mem_contiguous(arr, req->cq_pages,
+ pg_size);
+
+ if (!ret)
+ hmc_p->idx = palloc->level1.idx;
+ else
+ hmc_p->addr = arr[0];
+ break;
+ default:
+ ibdev_dbg(&iwdev->ibdev, "VERBS: MR type error\n");
+ err = -EINVAL;
+ }
+
+ if (use_pbles && ret) {
+ irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
+ iwpbl->pbl_allocated = false;
+ }
+
+ return err;
+}
+
+/**
+ * irdma_hw_alloc_mw - create the hw memory window
+ * @iwdev: irdma device
+ * @iwmr: pointer to memory window info
+ */
+static int irdma_hw_alloc_mw(struct irdma_device *iwdev, struct irdma_mr *iwmr)
+{
+ struct irdma_mw_alloc_info *info;
+ struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ enum irdma_status_code status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ info = &cqp_info->in.u.mw_alloc.info;
+ memset(info, 0, sizeof(*info));
+ if (iwmr->ibmw.type == IB_MW_TYPE_1)
+ info->mw_wide = true;
+
+ info->page_size = PAGE_SIZE;
+ info->mw_stag_index = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
+ info->pd_id = iwpd->sc_pd.pd_id;
+ info->remote_access = true;
+ cqp_info->cqp_cmd = IRDMA_OP_MW_ALLOC;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.mw_alloc.dev = &iwdev->rf->sc_dev;
+ cqp_info->in.u.mw_alloc.scratch = (uintptr_t)cqp_request;
+ status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
+ irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
+
+ return status ? -ENOMEM : 0;
+}
+
+/**
+ * irdma_alloc_mw - Allocate memory window
+ * @ibmw: Memory Window
+ * @udata: user data pointer
+ */
+static int irdma_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
+{
+ struct irdma_device *iwdev = to_iwdev(ibmw->device);
+ struct irdma_mr *iwmr = to_iwmw(ibmw);
+ int err_code;
+ u32 stag;
+
+ stag = irdma_create_stag(iwdev);
+ if (!stag)
+ return -ENOMEM;
+
+ iwmr->stag = stag;
+ ibmw->rkey = stag;
+
+ err_code = irdma_hw_alloc_mw(iwdev, iwmr);
+ if (err_code) {
+ irdma_free_stag(iwdev, stag);
+ return err_code;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_dealloc_mw - Dealloc memory window
+ * @ibmw: memory window structure.
+ */
+static int irdma_dealloc_mw(struct ib_mw *ibmw)
+{
+ struct ib_pd *ibpd = ibmw->pd;
+ struct irdma_pd *iwpd = to_iwpd(ibpd);
+ struct irdma_mr *iwmr = to_iwmr((struct ib_mr *)ibmw);
+ struct irdma_device *iwdev = to_iwdev(ibmw->device);
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_dealloc_stag_info *info;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ info = &cqp_info->in.u.dealloc_stag.info;
+ memset(info, 0, sizeof(*info));
+ info->pd_id = iwpd->sc_pd.pd_id & 0x00007fff;
+ info->stag_idx = ibmw->rkey >> IRDMA_CQPSQ_STAG_IDX_S;
+ info->mr = false;
+ cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
+ cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
+ irdma_handle_cqp_op(iwdev->rf, cqp_request);
+ irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
+ irdma_free_stag(iwdev, iwmr->stag);
+
+ return 0;
+}
+
+/**
+ * irdma_hw_alloc_stag - cqp command to allocate stag
+ * @iwdev: irdma device
+ * @iwmr: irdma mr pointer
+ */
+static int irdma_hw_alloc_stag(struct irdma_device *iwdev,
+ struct irdma_mr *iwmr)
+{
+ struct irdma_allocate_stag_info *info;
+ struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
+ enum irdma_status_code status;
+ int err = 0;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ info = &cqp_info->in.u.alloc_stag.info;
+ memset(info, 0, sizeof(*info));
+ info->page_size = PAGE_SIZE;
+ info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
+ info->pd_id = iwpd->sc_pd.pd_id;
+ info->total_len = iwmr->len;
+ info->remote_access = true;
+ cqp_info->cqp_cmd = IRDMA_OP_ALLOC_STAG;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.alloc_stag.dev = &iwdev->rf->sc_dev;
+ cqp_info->in.u.alloc_stag.scratch = (uintptr_t)cqp_request;
+ status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
+ irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
+ if (status)
+ err = -ENOMEM;
+
+ return err;
+}
+
+/**
+ * irdma_alloc_mr - register stag for fast memory registration
+ * @pd: ibpd pointer
+ * @mr_type: memory for stag registrion
+ * @max_num_sg: man number of pages
+ */
+static struct ib_mr *irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg)
+{
+ struct irdma_device *iwdev = to_iwdev(pd->device);
+ struct irdma_pble_alloc *palloc;
+ struct irdma_pbl *iwpbl;
+ struct irdma_mr *iwmr;
+ enum irdma_status_code status;
+ u32 stag;
+ int err_code = -ENOMEM;
+
+ iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
+ if (!iwmr)
+ return ERR_PTR(-ENOMEM);
+
+ stag = irdma_create_stag(iwdev);
+ if (!stag) {
+ err_code = -ENOMEM;
+ goto err;
+ }
+
+ iwmr->stag = stag;
+ iwmr->ibmr.rkey = stag;
+ iwmr->ibmr.lkey = stag;
+ iwmr->ibmr.pd = pd;
+ iwmr->ibmr.device = pd->device;
+ iwpbl = &iwmr->iwpbl;
+ iwpbl->iwmr = iwmr;
+ iwmr->type = IRDMA_MEMREG_TYPE_MEM;
+ palloc = &iwpbl->pble_alloc;
+ iwmr->page_cnt = max_num_sg;
+ status = irdma_get_pble(iwdev->rf->pble_rsrc, palloc, iwmr->page_cnt,
+ true);
+ if (status)
+ goto err_get_pble;
+
+ err_code = irdma_hw_alloc_stag(iwdev, iwmr);
+ if (err_code)
+ goto err_alloc_stag;
+
+ iwpbl->pbl_allocated = true;
+
+ return &iwmr->ibmr;
+err_alloc_stag:
+ irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
+err_get_pble:
+ irdma_free_stag(iwdev, stag);
+err:
+ kfree(iwmr);
+
+ return ERR_PTR(err_code);
+}
+
+/**
+ * irdma_set_page - populate pbl list for fmr
+ * @ibmr: ib mem to access iwarp mr pointer
+ * @addr: page dma address fro pbl list
+ */
+static int irdma_set_page(struct ib_mr *ibmr, u64 addr)
+{
+ struct irdma_mr *iwmr = to_iwmr(ibmr);
+ struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
+ u64 *pbl;
+
+ if (unlikely(iwmr->npages == iwmr->page_cnt))
+ return -ENOMEM;
+
+ pbl = palloc->level1.addr;
+ pbl[iwmr->npages++] = addr;
+
+ return 0;
+}
+
+/**
+ * irdma_map_mr_sg - map of sg list for fmr
+ * @ibmr: ib mem to access iwarp mr pointer
+ * @sg: scatter gather list
+ * @sg_nents: number of sg pages
+ * @sg_offset: scatter gather list for fmr
+ */
+static int irdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
+ int sg_nents, unsigned int *sg_offset)
+{
+ struct irdma_mr *iwmr = to_iwmr(ibmr);
+
+ iwmr->npages = 0;
+
+ return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, irdma_set_page);
+}
+
+/**
+ * irdma_hwreg_mr - send cqp command for memory registration
+ * @iwdev: irdma device
+ * @iwmr: irdma mr pointer
+ * @access: access for MR
+ */
+static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
+ u16 access)
+{
+ struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ struct irdma_reg_ns_stag_info *stag_info;
+ struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
+ struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
+ enum irdma_status_code status;
+ int err = 0;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ stag_info = &cqp_info->in.u.mr_reg_non_shared.info;
+ memset(stag_info, 0, sizeof(*stag_info));
+ stag_info->va = iwpbl->user_base;
+ stag_info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
+ stag_info->stag_key = (u8)iwmr->stag;
+ stag_info->total_len = iwmr->len;
+ stag_info->access_rights = irdma_get_mr_access(access);
+ stag_info->pd_id = iwpd->sc_pd.pd_id;
+ if (stag_info->access_rights & IRDMA_ACCESS_FLAGS_ZERO_BASED)
+ stag_info->addr_type = IRDMA_ADDR_TYPE_ZERO_BASED;
+ else
+ stag_info->addr_type = IRDMA_ADDR_TYPE_VA_BASED;
+ stag_info->page_size = iwmr->page_size;
+
+ if (iwpbl->pbl_allocated) {
+ if (palloc->level == PBLE_LEVEL_1) {
+ stag_info->first_pm_pbl_index = palloc->level1.idx;
+ stag_info->chunk_size = 1;
+ } else {
+ stag_info->first_pm_pbl_index = palloc->level2.root.idx;
+ stag_info->chunk_size = 3;
+ }
+ } else {
+ stag_info->reg_addr_pa = iwmr->pgaddrmem[0];
+ }
+
+ cqp_info->cqp_cmd = IRDMA_OP_MR_REG_NON_SHARED;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->rf->sc_dev;
+ cqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request;
+ status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
+ irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
+ if (status)
+ err = -ENOMEM;
+
+ return err;
+}
+
+/**
+ * irdma_reg_user_mr - Register a user memory region
+ * @pd: ptr of pd
+ * @start: virtual start address
+ * @len: length of mr
+ * @virt: virtual address
+ * @access: access of mr
+ * @udata: user data
+ */
+static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
+ u64 virt, int access,
+ struct ib_udata *udata)
+{
+ struct irdma_device *iwdev = to_iwdev(pd->device);
+ struct irdma_ucontext *ucontext;
+ struct irdma_pble_alloc *palloc;
+ struct irdma_pbl *iwpbl;
+ struct irdma_mr *iwmr;
+ struct ib_umem *region;
+ struct irdma_mem_reg_req req;
+ u32 total, stag = 0;
+ u8 shadow_pgcnt = 1;
+ bool use_pbles = false;
+ unsigned long flags;
+ int err = -EINVAL;
+ int ret;
+
+ if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
+ return ERR_PTR(-EINVAL);
+
+ region = ib_umem_get(pd->device, start, len, access);
+
+ if (IS_ERR(region)) {
+ ibdev_dbg(&iwdev->ibdev,
+ "VERBS: Failed to create ib_umem region\n");
+ return (struct ib_mr *)region;
+ }
+
+ if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) {
+ ib_umem_release(region);
+ return ERR_PTR(-EFAULT);
+ }
+
+ iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
+ if (!iwmr) {
+ ib_umem_release(region);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ iwpbl = &iwmr->iwpbl;
+ iwpbl->iwmr = iwmr;
+ iwmr->region = region;
+ iwmr->ibmr.pd = pd;
+ iwmr->ibmr.device = pd->device;
+ iwmr->ibmr.iova = virt;
+ iwmr->page_size = PAGE_SIZE;
+
+ if (req.reg_type == IRDMA_MEMREG_TYPE_MEM) {
+ iwmr->page_size = ib_umem_find_best_pgsz(region,
+ SZ_4K | SZ_2M | SZ_1G,
+ virt);
+ if (unlikely(!iwmr->page_size)) {
+ kfree(iwmr);
+ ib_umem_release(region);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+ }
+ iwmr->len = region->length;
+ iwpbl->user_base = virt;
+ palloc = &iwpbl->pble_alloc;
+ iwmr->type = req.reg_type;
+ iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size);
+
+ switch (req.reg_type) {
+ case IRDMA_MEMREG_TYPE_QP:
+ total = req.sq_pages + req.rq_pages + shadow_pgcnt;
+ if (total > iwmr->page_cnt) {
+ err = -EINVAL;
+ goto error;
+ }
+ total = req.sq_pages + req.rq_pages;
+ use_pbles = (total > 2);
+ err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
+ if (err)
+ goto error;
+
+ ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
+ ibucontext);
+ spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
+ list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
+ iwpbl->on_list = true;
+ spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
+ break;
+ case IRDMA_MEMREG_TYPE_CQ:
+ if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
+ shadow_pgcnt = 0;
+ total = req.cq_pages + shadow_pgcnt;
+ if (total > iwmr->page_cnt) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ use_pbles = (req.cq_pages > 1);
+ err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
+ if (err)
+ goto error;
+
+ ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
+ ibucontext);
+ spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
+ list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
+ iwpbl->on_list = true;
+ spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
+ break;
+ case IRDMA_MEMREG_TYPE_MEM:
+ use_pbles = (iwmr->page_cnt != 1);
+
+ err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles);
+ if (err)
+ goto error;
+
+ if (use_pbles) {
+ ret = irdma_check_mr_contiguous(palloc,
+ iwmr->page_size);
+ if (ret) {
+ irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
+ iwpbl->pbl_allocated = false;
+ }
+ }
+
+ stag = irdma_create_stag(iwdev);
+ if (!stag) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ iwmr->stag = stag;
+ iwmr->ibmr.rkey = stag;
+ iwmr->ibmr.lkey = stag;
+ err = irdma_hwreg_mr(iwdev, iwmr, access);
+ if (err) {
+ irdma_free_stag(iwdev, stag);
+ goto error;
+ }
+
+ break;
+ default:
+ goto error;
+ }
+
+ iwmr->type = req.reg_type;
+
+ return &iwmr->ibmr;
+
+error:
+ if (palloc->level != PBLE_LEVEL_0 && iwpbl->pbl_allocated)
+ irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
+ ib_umem_release(region);
+ kfree(iwmr);
+
+ return ERR_PTR(err);
+}
+
+/**
+ * irdma_reg_phys_mr - register kernel physical memory
+ * @pd: ibpd pointer
+ * @addr: physical address of memory to register
+ * @size: size of memory to register
+ * @access: Access rights
+ * @iova_start: start of virtual address for physical buffers
+ */
+struct ib_mr *irdma_reg_phys_mr(struct ib_pd *pd, u64 addr, u64 size, int access,
+ u64 *iova_start)
+{
+ struct irdma_device *iwdev = to_iwdev(pd->device);
+ struct irdma_pbl *iwpbl;
+ struct irdma_mr *iwmr;
+ enum irdma_status_code status;
+ u32 stag;
+ int ret;
+
+ iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
+ if (!iwmr)
+ return ERR_PTR(-ENOMEM);
+
+ iwmr->ibmr.pd = pd;
+ iwmr->ibmr.device = pd->device;
+ iwpbl = &iwmr->iwpbl;
+ iwpbl->iwmr = iwmr;
+ iwmr->type = IRDMA_MEMREG_TYPE_MEM;
+ iwpbl->user_base = *iova_start;
+ stag = irdma_create_stag(iwdev);
+ if (!stag) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ iwmr->stag = stag;
+ iwmr->ibmr.iova = *iova_start;
+ iwmr->ibmr.rkey = stag;
+ iwmr->ibmr.lkey = stag;
+ iwmr->page_cnt = 1;
+ iwmr->pgaddrmem[0] = addr;
+ iwmr->len = size;
+ iwmr->page_size = SZ_4K;
+ status = irdma_hwreg_mr(iwdev, iwmr, access);
+ if (status) {
+ irdma_free_stag(iwdev, stag);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ return &iwmr->ibmr;
+
+err:
+ kfree(iwmr);
+
+ return ERR_PTR(ret);
+}
+
+/**
+ * irdma_get_dma_mr - register physical mem
+ * @pd: ptr of pd
+ * @acc: access for memory
+ */
+static struct ib_mr *irdma_get_dma_mr(struct ib_pd *pd, int acc)
+{
+ u64 kva = 0;
+
+ return irdma_reg_phys_mr(pd, 0, 0, acc, &kva);
+}
+
+/**
+ * irdma_del_memlist - Deleting pbl list entries for CQ/QP
+ * @iwmr: iwmr for IB's user page addresses
+ * @ucontext: ptr to user context
+ */
+static void irdma_del_memlist(struct irdma_mr *iwmr,
+ struct irdma_ucontext *ucontext)
+{
+ struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ unsigned long flags;
+
+ switch (iwmr->type) {
+ case IRDMA_MEMREG_TYPE_CQ:
+ spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
+ if (iwpbl->on_list) {
+ iwpbl->on_list = false;
+ list_del(&iwpbl->list);
+ }
+ spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
+ break;
+ case IRDMA_MEMREG_TYPE_QP:
+ spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
+ if (iwpbl->on_list) {
+ iwpbl->on_list = false;
+ list_del(&iwpbl->list);
+ }
+ spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * irdma_dereg_mr - deregister mr
+ * @ib_mr: mr ptr for dereg
+ * @udata: user data
+ */
+static int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
+{
+ struct ib_pd *ibpd = ib_mr->pd;
+ struct irdma_pd *iwpd = to_iwpd(ibpd);
+ struct irdma_mr *iwmr = to_iwmr(ib_mr);
+ struct irdma_device *iwdev = to_iwdev(ib_mr->device);
+ struct irdma_dealloc_stag_info *info;
+ struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+
+ if (iwmr->type != IRDMA_MEMREG_TYPE_MEM) {
+ if (iwmr->region) {
+ struct irdma_ucontext *ucontext;
+
+ ucontext = rdma_udata_to_drv_context(udata,
+ struct irdma_ucontext,
+ ibucontext);
+ irdma_del_memlist(iwmr, ucontext);
+ }
+ goto done;
+ }
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ info = &cqp_info->in.u.dealloc_stag.info;
+ memset(info, 0, sizeof(*info));
+ info->pd_id = iwpd->sc_pd.pd_id & 0x00007fff;
+ info->stag_idx = ib_mr->rkey >> IRDMA_CQPSQ_STAG_IDX_S;
+ info->mr = true;
+ if (iwpbl->pbl_allocated)
+ info->dealloc_pbl = true;
+
+ cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
+ cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
+ irdma_handle_cqp_op(iwdev->rf, cqp_request);
+ irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
+ irdma_free_stag(iwdev, iwmr->stag);
+done:
+ if (iwpbl->pbl_allocated)
+ irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
+ ib_umem_release(iwmr->region);
+ kfree(iwmr);
+
+ return 0;
+}
+
+/**
+ * irdma_copy_sg_list - copy sg list for qp
+ * @sg_list: copied into sg_list
+ * @sgl: copy from sgl
+ * @num_sges: count of sg entries
+ */
+static void irdma_copy_sg_list(struct irdma_sge *sg_list, struct ib_sge *sgl,
+ int num_sges)
+{
+ unsigned int i;
+
+ for (i = 0; (i < num_sges) && (i < IRDMA_MAX_WQ_FRAGMENT_COUNT); i++) {
+ sg_list[i].tag_off = sgl[i].addr;
+ sg_list[i].len = sgl[i].length;
+ sg_list[i].stag = sgl[i].lkey;
+ }
+}
+
+/**
+ * irdma_post_send - kernel application wr
+ * @ibqp: qp ptr for wr
+ * @ib_wr: work request ptr
+ * @bad_wr: return of bad wr if err
+ */
+static int irdma_post_send(struct ib_qp *ibqp,
+ const struct ib_send_wr *ib_wr,
+ const struct ib_send_wr **bad_wr)
+{
+ struct irdma_qp *iwqp;
+ struct irdma_qp_uk *ukqp;
+ struct irdma_sc_dev *dev;
+ struct irdma_post_sq_info info;
+ enum irdma_status_code ret;
+ int err = 0;
+ unsigned long flags;
+ bool inv_stag;
+ struct irdma_ah *ah;
+ bool reflush = false;
+
+ iwqp = to_iwqp(ibqp);
+ ukqp = &iwqp->sc_qp.qp_uk;
+ dev = &iwqp->iwdev->rf->sc_dev;
+
+ spin_lock_irqsave(&iwqp->lock, flags);
+ if (iwqp->flush_issued && ukqp->sq_flush_complete)
+ reflush = true;
+ while (ib_wr) {
+ memset(&info, 0, sizeof(info));
+ inv_stag = false;
+ info.wr_id = (ib_wr->wr_id);
+ if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all)
+ info.signaled = true;
+ if (ib_wr->send_flags & IB_SEND_FENCE)
+ info.read_fence = true;
+ switch (ib_wr->opcode) {
+ case IB_WR_SEND_WITH_IMM:
+ if (ukqp->qp_caps & IRDMA_SEND_WITH_IMM) {
+ info.imm_data_valid = true;
+ info.imm_data = ntohl(ib_wr->ex.imm_data);
+ } else {
+ err = -EINVAL;
+ break;
+ }
+ fallthrough;
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_INV:
+ if (ib_wr->opcode == IB_WR_SEND ||
+ ib_wr->opcode == IB_WR_SEND_WITH_IMM) {
+ if (ib_wr->send_flags & IB_SEND_SOLICITED)
+ info.op_type = IRDMA_OP_TYPE_SEND_SOL;
+ else
+ info.op_type = IRDMA_OP_TYPE_SEND;
+ } else {
+ if (ib_wr->send_flags & IB_SEND_SOLICITED)
+ info.op_type = IRDMA_OP_TYPE_SEND_SOL_INV;
+ else
+ info.op_type = IRDMA_OP_TYPE_SEND_INV;
+ info.stag_to_inv = ib_wr->ex.invalidate_rkey;
+ }
+
+ if (ib_wr->send_flags & IB_SEND_INLINE) {
+ info.op.inline_send.data = (void *)(unsigned long)
+ ib_wr->sg_list[0].addr;
+ info.op.inline_send.len = ib_wr->sg_list[0].length;
+ if (iwqp->ibqp.qp_type == IB_QPT_UD ||
+ iwqp->ibqp.qp_type == IB_QPT_GSI) {
+ ah = to_iwah(ud_wr(ib_wr)->ah);
+ info.op.inline_send.ah_id = ah->sc_ah.ah_info.ah_idx;
+ info.op.inline_send.qkey = ud_wr(ib_wr)->remote_qkey;
+ info.op.inline_send.dest_qp = ud_wr(ib_wr)->remote_qpn;
+ }
+ ret = irdma_uk_inline_send(ukqp, &info, false);
+ } else {
+ info.op.send.num_sges = ib_wr->num_sge;
+ info.op.send.sg_list = (struct irdma_sge *)
+ ib_wr->sg_list;
+ if (iwqp->ibqp.qp_type == IB_QPT_UD ||
+ iwqp->ibqp.qp_type == IB_QPT_GSI) {
+ ah = to_iwah(ud_wr(ib_wr)->ah);
+ info.op.send.ah_id = ah->sc_ah.ah_info.ah_idx;
+ info.op.send.qkey = ud_wr(ib_wr)->remote_qkey;
+ info.op.send.dest_qp = ud_wr(ib_wr)->remote_qpn;
+ }
+ ret = irdma_uk_send(ukqp, &info, false);
+ }
+
+ if (ret) {
+ if (ret == IRDMA_ERR_QP_TOOMANY_WRS_POSTED)
+ err = -ENOMEM;
+ else
+ err = -EINVAL;
+ }
+ break;
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ if (ukqp->qp_caps & IRDMA_WRITE_WITH_IMM) {
+ info.imm_data_valid = true;
+ info.imm_data = ntohl(ib_wr->ex.imm_data);
+ } else {
+ err = -EINVAL;
+ break;
+ }
+ fallthrough;
+ case IB_WR_RDMA_WRITE:
+ if (ib_wr->send_flags & IB_SEND_SOLICITED)
+ info.op_type = IRDMA_OP_TYPE_RDMA_WRITE_SOL;
+ else
+ info.op_type = IRDMA_OP_TYPE_RDMA_WRITE;
+
+ if (ib_wr->send_flags & IB_SEND_INLINE) {
+ info.op.inline_rdma_write.data = (void *)(uintptr_t)ib_wr->sg_list[0].addr;
+ info.op.inline_rdma_write.len = ib_wr->sg_list[0].length;
+ info.op.inline_rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
+ info.op.inline_rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
+ ret = irdma_uk_inline_rdma_write(ukqp, &info, false);
+ } else {
+ info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;
+ info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
+ info.op.rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
+ info.op.rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
+ ret = irdma_uk_rdma_write(ukqp, &info, false);
+ }
+
+ if (ret) {
+ if (ret == IRDMA_ERR_QP_TOOMANY_WRS_POSTED)
+ err = -ENOMEM;
+ else
+ err = -EINVAL;
+ }
+ break;
+ case IB_WR_RDMA_READ_WITH_INV:
+ inv_stag = true;
+ fallthrough;
+ case IB_WR_RDMA_READ:
+ if (ib_wr->num_sge >
+ dev->hw_attrs.uk_attrs.max_hw_read_sges) {
+ err = -EINVAL;
+ break;
+ }
+ info.op_type = IRDMA_OP_TYPE_RDMA_READ;
+ info.op.rdma_read.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
+ info.op.rdma_read.rem_addr.stag = rdma_wr(ib_wr)->rkey;
+ info.op.rdma_read.lo_sg_list = (void *)ib_wr->sg_list;
+ info.op.rdma_read.num_lo_sges = ib_wr->num_sge;
+
+ ret = irdma_uk_rdma_read(ukqp, &info, inv_stag, false);
+ if (ret) {
+ if (ret == IRDMA_ERR_QP_TOOMANY_WRS_POSTED)
+ err = -ENOMEM;
+ else
+ err = -EINVAL;
+ }
+ break;
+ case IB_WR_LOCAL_INV:
+ info.op_type = IRDMA_OP_TYPE_INV_STAG;
+ info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey;
+ ret = irdma_uk_stag_local_invalidate(ukqp, &info, true);
+ if (ret)
+ err = -ENOMEM;
+ break;
+ case IB_WR_REG_MR: {
+ struct irdma_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr);
+ struct irdma_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc;
+ struct irdma_fast_reg_stag_info stag_info = {};
+
+ stag_info.signaled = info.signaled;
+ stag_info.read_fence = info.read_fence;
+ stag_info.access_rights = irdma_get_mr_access(reg_wr(ib_wr)->access);
+ stag_info.stag_key = reg_wr(ib_wr)->key & 0xff;
+ stag_info.stag_idx = reg_wr(ib_wr)->key >> 8;
+ stag_info.page_size = reg_wr(ib_wr)->mr->page_size;
+ stag_info.wr_id = ib_wr->wr_id;
+ stag_info.addr_type = IRDMA_ADDR_TYPE_VA_BASED;
+ stag_info.va = (void *)(uintptr_t)iwmr->ibmr.iova;
+ stag_info.total_len = iwmr->ibmr.length;
+ stag_info.reg_addr_pa = *palloc->level1.addr;
+ stag_info.first_pm_pbl_index = palloc->level1.idx;
+ stag_info.local_fence = ib_wr->send_flags & IB_SEND_FENCE;
+ if (iwmr->npages > IRDMA_MIN_PAGES_PER_FMR)
+ stag_info.chunk_size = 1;
+ ret = irdma_sc_mr_fast_register(&iwqp->sc_qp, &stag_info,
+ true);
+ if (ret)
+ err = -ENOMEM;
+ break;
+ }
+ default:
+ err = -EINVAL;
+ ibdev_dbg(&iwqp->iwdev->ibdev,
+ "VERBS: upost_send bad opcode = 0x%x\n",
+ ib_wr->opcode);
+ break;
+ }
+
+ if (err)
+ break;
+ ib_wr = ib_wr->next;
+ }
+
+ if (!iwqp->flush_issued && iwqp->hw_iwarp_state <= IRDMA_QP_STATE_RTS) {
+ irdma_uk_qp_post_wr(ukqp);
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ } else if (reflush) {
+ ukqp->sq_flush_complete = false;
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ | IRDMA_REFLUSH);
+ } else {
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ }
+ if (err)
+ *bad_wr = ib_wr;
+
+ return err;
+}
+
+/**
+ * irdma_post_recv - post receive wr for kernel application
+ * @ibqp: ib qp pointer
+ * @ib_wr: work request for receive
+ * @bad_wr: bad wr caused an error
+ */
+static int irdma_post_recv(struct ib_qp *ibqp,
+ const struct ib_recv_wr *ib_wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ struct irdma_qp *iwqp;
+ struct irdma_qp_uk *ukqp;
+ struct irdma_post_rq_info post_recv = {};
+ struct irdma_sge sg_list[IRDMA_MAX_WQ_FRAGMENT_COUNT];
+ enum irdma_status_code ret = 0;
+ unsigned long flags;
+ int err = 0;
+ bool reflush = false;
+
+ iwqp = to_iwqp(ibqp);
+ ukqp = &iwqp->sc_qp.qp_uk;
+
+ spin_lock_irqsave(&iwqp->lock, flags);
+ if (iwqp->flush_issued && ukqp->rq_flush_complete)
+ reflush = true;
+ while (ib_wr) {
+ post_recv.num_sges = ib_wr->num_sge;
+ post_recv.wr_id = ib_wr->wr_id;
+ irdma_copy_sg_list(sg_list, ib_wr->sg_list, ib_wr->num_sge);
+ post_recv.sg_list = sg_list;
+ ret = irdma_uk_post_receive(ukqp, &post_recv);
+ if (ret) {
+ ibdev_dbg(&iwqp->iwdev->ibdev,
+ "VERBS: post_recv err %d\n", ret);
+ if (ret == IRDMA_ERR_QP_TOOMANY_WRS_POSTED)
+ err = -ENOMEM;
+ else
+ err = -EINVAL;
+ goto out;
+ }
+
+ ib_wr = ib_wr->next;
+ }
+
+out:
+ if (reflush) {
+ ukqp->rq_flush_complete = false;
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ irdma_flush_wqes(iwqp, IRDMA_FLUSH_RQ | IRDMA_REFLUSH);
+ } else {
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ }
+
+ if (err)
+ *bad_wr = ib_wr;
+
+ return err;
+}
+
+/**
+ * irdma_flush_err_to_ib_wc_status - return change flush error code to IB status
+ * @opcode: iwarp flush code
+ */
+static enum ib_wc_status irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode opcode)
+{
+ switch (opcode) {
+ case FLUSH_PROT_ERR:
+ return IB_WC_LOC_PROT_ERR;
+ case FLUSH_REM_ACCESS_ERR:
+ return IB_WC_REM_ACCESS_ERR;
+ case FLUSH_LOC_QP_OP_ERR:
+ return IB_WC_LOC_QP_OP_ERR;
+ case FLUSH_REM_OP_ERR:
+ return IB_WC_REM_OP_ERR;
+ case FLUSH_LOC_LEN_ERR:
+ return IB_WC_LOC_LEN_ERR;
+ case FLUSH_GENERAL_ERR:
+ return IB_WC_WR_FLUSH_ERR;
+ case FLUSH_FATAL_ERR:
+ default:
+ return IB_WC_FATAL_ERR;
+ }
+}
+
+/**
+ * irdma_process_cqe - process cqe info
+ * @entry: processed cqe
+ * @cq_poll_info: cqe info
+ */
+static void irdma_process_cqe(struct ib_wc *entry,
+ struct irdma_cq_poll_info *cq_poll_info)
+{
+ struct irdma_qp *iwqp;
+ struct irdma_sc_qp *qp;
+
+ entry->wc_flags = 0;
+ entry->pkey_index = 0;
+ entry->wr_id = cq_poll_info->wr_id;
+
+ qp = cq_poll_info->qp_handle;
+ iwqp = qp->qp_uk.back_qp;
+ entry->qp = qp->qp_uk.back_qp;
+
+ if (cq_poll_info->error) {
+ entry->status = (cq_poll_info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) ?
+ irdma_flush_err_to_ib_wc_status(cq_poll_info->minor_err) : IB_WC_GENERAL_ERR;
+
+ entry->vendor_err = cq_poll_info->major_err << 16 |
+ cq_poll_info->minor_err;
+ } else {
+ entry->status = IB_WC_SUCCESS;
+ if (cq_poll_info->imm_valid) {
+ entry->ex.imm_data = htonl(cq_poll_info->imm_data);
+ entry->wc_flags |= IB_WC_WITH_IMM;
+ }
+ if (cq_poll_info->ud_smac_valid) {
+ ether_addr_copy(entry->smac, cq_poll_info->ud_smac);
+ entry->wc_flags |= IB_WC_WITH_SMAC;
+ }
+
+ if (cq_poll_info->ud_vlan_valid) {
+ entry->vlan_id = cq_poll_info->ud_vlan & VLAN_VID_MASK;
+ entry->wc_flags |= IB_WC_WITH_VLAN;
+ entry->sl = cq_poll_info->ud_vlan >> VLAN_PRIO_SHIFT;
+ } else {
+ entry->sl = 0;
+ }
+ }
+
+ switch (cq_poll_info->op_type) {
+ case IRDMA_OP_TYPE_RDMA_WRITE:
+ case IRDMA_OP_TYPE_RDMA_WRITE_SOL:
+ entry->opcode = IB_WC_RDMA_WRITE;
+ break;
+ case IRDMA_OP_TYPE_RDMA_READ_INV_STAG:
+ case IRDMA_OP_TYPE_RDMA_READ:
+ entry->opcode = IB_WC_RDMA_READ;
+ break;
+ case IRDMA_OP_TYPE_SEND_INV:
+ case IRDMA_OP_TYPE_SEND_SOL:
+ case IRDMA_OP_TYPE_SEND_SOL_INV:
+ case IRDMA_OP_TYPE_SEND:
+ entry->opcode = IB_WC_SEND;
+ break;
+ case IRDMA_OP_TYPE_FAST_REG_NSMR:
+ entry->opcode = IB_WC_REG_MR;
+ break;
+ case IRDMA_OP_TYPE_INV_STAG:
+ entry->opcode = IB_WC_LOCAL_INV;
+ break;
+ case IRDMA_OP_TYPE_REC_IMM:
+ case IRDMA_OP_TYPE_REC:
+ entry->opcode = cq_poll_info->op_type == IRDMA_OP_TYPE_REC_IMM ?
+ IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
+ if (qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_UD &&
+ cq_poll_info->stag_invalid_set) {
+ entry->ex.invalidate_rkey = cq_poll_info->inv_stag;
+ entry->wc_flags |= IB_WC_WITH_INVALIDATE;
+ }
+ break;
+ default:
+ ibdev_err(&iwqp->iwdev->ibdev,
+ "Invalid opcode = %d in CQE\n", cq_poll_info->op_type);
+ entry->status = IB_WC_GENERAL_ERR;
+ return;
+ }
+
+ if (qp->qp_uk.qp_type == IRDMA_QP_TYPE_ROCE_UD) {
+ entry->src_qp = cq_poll_info->ud_src_qpn;
+ entry->slid = 0;
+ entry->wc_flags |=
+ (IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE);
+ entry->network_hdr_type = cq_poll_info->ipv4 ?
+ RDMA_NETWORK_IPV4 :
+ RDMA_NETWORK_IPV6;
+ } else {
+ entry->src_qp = cq_poll_info->qp_id;
+ }
+
+ entry->byte_len = cq_poll_info->bytes_xfered;
+}
+
+/**
+ * irdma_poll_one - poll one entry of the CQ
+ * @ukcq: ukcq to poll
+ * @cur_cqe: current CQE info to be filled in
+ * @entry: ibv_wc object to be filled for non-extended CQ or NULL for extended CQ
+ *
+ * Returns the internal irdma device error code or 0 on success
+ */
+static inline int irdma_poll_one(struct irdma_cq_uk *ukcq,
+ struct irdma_cq_poll_info *cur_cqe,
+ struct ib_wc *entry)
+{
+ int ret = irdma_uk_cq_poll_cmpl(ukcq, cur_cqe);
+
+ if (ret)
+ return ret;
+
+ irdma_process_cqe(entry, cur_cqe);
+
+ return 0;
+}
+
+/**
+ * __irdma_poll_cq - poll cq for completion (kernel apps)
+ * @iwcq: cq to poll
+ * @num_entries: number of entries to poll
+ * @entry: wr of a completed entry
+ */
+static int __irdma_poll_cq(struct irdma_cq *iwcq, int num_entries, struct ib_wc *entry)
+{
+ struct list_head *tmp_node, *list_node;
+ struct irdma_cq_buf *last_buf = NULL;
+ struct irdma_cq_poll_info *cur_cqe = &iwcq->cur_cqe;
+ struct irdma_cq_buf *cq_buf;
+ enum irdma_status_code ret;
+ struct irdma_device *iwdev;
+ struct irdma_cq_uk *ukcq;
+ bool cq_new_cqe = false;
+ int resized_bufs = 0;
+ int npolled = 0;
+
+ iwdev = to_iwdev(iwcq->ibcq.device);
+ ukcq = &iwcq->sc_cq.cq_uk;
+
+ /* go through the list of previously resized CQ buffers */
+ list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) {
+ cq_buf = container_of(list_node, struct irdma_cq_buf, list);
+ while (npolled < num_entries) {
+ ret = irdma_poll_one(&cq_buf->cq_uk, cur_cqe, entry + npolled);
+ if (!ret) {
+ ++npolled;
+ cq_new_cqe = true;
+ continue;
+ }
+ if (ret == IRDMA_ERR_Q_EMPTY)
+ break;
+ /* QP using the CQ is destroyed. Skip reporting this CQE */
+ if (ret == IRDMA_ERR_Q_DESTROYED) {
+ cq_new_cqe = true;
+ continue;
+ }
+ goto error;
+ }
+
+ /* save the resized CQ buffer which received the last cqe */
+ if (cq_new_cqe)
+ last_buf = cq_buf;
+ cq_new_cqe = false;
+ }
+
+ /* check the current CQ for new cqes */
+ while (npolled < num_entries) {
+ ret = irdma_poll_one(ukcq, cur_cqe, entry + npolled);
+ if (!ret) {
+ ++npolled;
+ cq_new_cqe = true;
+ continue;
+ }
+
+ if (ret == IRDMA_ERR_Q_EMPTY)
+ break;
+ /* QP using the CQ is destroyed. Skip reporting this CQE */
+ if (ret == IRDMA_ERR_Q_DESTROYED) {
+ cq_new_cqe = true;
+ continue;
+ }
+ goto error;
+ }
+
+ if (cq_new_cqe)
+ /* all previous CQ resizes are complete */
+ resized_bufs = irdma_process_resize_list(iwcq, iwdev, NULL);
+ else if (last_buf)
+ /* only CQ resizes up to the last_buf are complete */
+ resized_bufs = irdma_process_resize_list(iwcq, iwdev, last_buf);
+ if (resized_bufs)
+ /* report to the HW the number of complete CQ resizes */
+ irdma_uk_cq_set_resized_cnt(ukcq, resized_bufs);
+
+ return npolled;
+error:
+ ibdev_dbg(&iwdev->ibdev, "%s: Error polling CQ, irdma_err: %d\n",
+ __func__, ret);
+
+ return -EINVAL;
+}
+
+/**
+ * irdma_poll_cq - poll cq for completion (kernel apps)
+ * @ibcq: cq to poll
+ * @num_entries: number of entries to poll
+ * @entry: wr of a completed entry
+ */
+static int irdma_poll_cq(struct ib_cq *ibcq, int num_entries,
+ struct ib_wc *entry)
+{
+ struct irdma_cq *iwcq;
+ unsigned long flags;
+ int ret;
+
+ iwcq = to_iwcq(ibcq);
+
+ spin_lock_irqsave(&iwcq->lock, flags);
+ ret = __irdma_poll_cq(iwcq, num_entries, entry);
+ spin_unlock_irqrestore(&iwcq->lock, flags);
+
+ return ret;
+}
+
+/**
+ * irdma_req_notify_cq - arm cq kernel application
+ * @ibcq: cq to arm
+ * @notify_flags: notofication flags
+ */
+static int irdma_req_notify_cq(struct ib_cq *ibcq,
+ enum ib_cq_notify_flags notify_flags)
+{
+ struct irdma_cq *iwcq;
+ struct irdma_cq_uk *ukcq;
+ unsigned long flags;
+ enum irdma_cmpl_notify cq_notify = IRDMA_CQ_COMPL_EVENT;
+
+ iwcq = to_iwcq(ibcq);
+ ukcq = &iwcq->sc_cq.cq_uk;
+ if (notify_flags == IB_CQ_SOLICITED)
+ cq_notify = IRDMA_CQ_COMPL_SOLICITED;
+
+ spin_lock_irqsave(&iwcq->lock, flags);
+ irdma_uk_cq_request_notification(ukcq, cq_notify);
+ spin_unlock_irqrestore(&iwcq->lock, flags);
+
+ return 0;
+}
+
+static int irdma_roce_port_immutable(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_immutable *immutable)
+{
+ struct ib_port_attr attr;
+ int err;
+
+ immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+ err = ib_query_port(ibdev, port_num, &attr);
+ if (err)
+ return err;
+
+ immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+ immutable->pkey_tbl_len = attr.pkey_tbl_len;
+ immutable->gid_tbl_len = attr.gid_tbl_len;
+
+ return 0;
+}
+
+static int irdma_iw_port_immutable(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_immutable *immutable)
+{
+ struct ib_port_attr attr;
+ int err;
+
+ immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
+ err = ib_query_port(ibdev, port_num, &attr);
+ if (err)
+ return err;
+ immutable->gid_tbl_len = attr.gid_tbl_len;
+
+ return 0;
+}
+
+static const char *const irdma_hw_stat_names[] = {
+ /* 32bit names */
+ [IRDMA_HW_STAT_INDEX_RXVLANERR] = "rxVlanErrors",
+ [IRDMA_HW_STAT_INDEX_IP4RXDISCARD] = "ip4InDiscards",
+ [IRDMA_HW_STAT_INDEX_IP4RXTRUNC] = "ip4InTruncatedPkts",
+ [IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] = "ip4OutNoRoutes",
+ [IRDMA_HW_STAT_INDEX_IP6RXDISCARD] = "ip6InDiscards",
+ [IRDMA_HW_STAT_INDEX_IP6RXTRUNC] = "ip6InTruncatedPkts",
+ [IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] = "ip6OutNoRoutes",
+ [IRDMA_HW_STAT_INDEX_TCPRTXSEG] = "tcpRetransSegs",
+ [IRDMA_HW_STAT_INDEX_TCPRXOPTERR] = "tcpInOptErrors",
+ [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR] = "tcpInProtoErrors",
+ [IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED] = "cnpHandled",
+ [IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED] = "cnpIgnored",
+ [IRDMA_HW_STAT_INDEX_TXNPCNPSENT] = "cnpSent",
+
+ /* 64bit names */
+ [IRDMA_HW_STAT_INDEX_IP4RXOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ "ip4InOctets",
+ [IRDMA_HW_STAT_INDEX_IP4RXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ "ip4InPkts",
+ [IRDMA_HW_STAT_INDEX_IP4RXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ "ip4InReasmRqd",
+ [IRDMA_HW_STAT_INDEX_IP4RXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ "ip4InMcastOctets",
+ [IRDMA_HW_STAT_INDEX_IP4RXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ "ip4InMcastPkts",
+ [IRDMA_HW_STAT_INDEX_IP4TXOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ "ip4OutOctets",
+ [IRDMA_HW_STAT_INDEX_IP4TXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ "ip4OutPkts",
+ [IRDMA_HW_STAT_INDEX_IP4TXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ "ip4OutSegRqd",
+ [IRDMA_HW_STAT_INDEX_IP4TXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ "ip4OutMcastOctets",
+ [IRDMA_HW_STAT_INDEX_IP4TXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ "ip4OutMcastPkts",
+ [IRDMA_HW_STAT_INDEX_IP6RXOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ "ip6InOctets",
+ [IRDMA_HW_STAT_INDEX_IP6RXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ "ip6InPkts",
+ [IRDMA_HW_STAT_INDEX_IP6RXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ "ip6InReasmRqd",
+ [IRDMA_HW_STAT_INDEX_IP6RXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ "ip6InMcastOctets",
+ [IRDMA_HW_STAT_INDEX_IP6RXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ "ip6InMcastPkts",
+ [IRDMA_HW_STAT_INDEX_IP6TXOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ "ip6OutOctets",
+ [IRDMA_HW_STAT_INDEX_IP6TXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ "ip6OutPkts",
+ [IRDMA_HW_STAT_INDEX_IP6TXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ "ip6OutSegRqd",
+ [IRDMA_HW_STAT_INDEX_IP6TXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ "ip6OutMcastOctets",
+ [IRDMA_HW_STAT_INDEX_IP6TXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ "ip6OutMcastPkts",
+ [IRDMA_HW_STAT_INDEX_TCPRXSEGS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ "tcpInSegs",
+ [IRDMA_HW_STAT_INDEX_TCPTXSEG + IRDMA_HW_STAT_INDEX_MAX_32] =
+ "tcpOutSegs",
+ [IRDMA_HW_STAT_INDEX_RDMARXRDS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ "iwInRdmaReads",
+ [IRDMA_HW_STAT_INDEX_RDMARXSNDS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ "iwInRdmaSends",
+ [IRDMA_HW_STAT_INDEX_RDMARXWRS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ "iwInRdmaWrites",
+ [IRDMA_HW_STAT_INDEX_RDMATXRDS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ "iwOutRdmaReads",
+ [IRDMA_HW_STAT_INDEX_RDMATXSNDS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ "iwOutRdmaSends",
+ [IRDMA_HW_STAT_INDEX_RDMATXWRS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ "iwOutRdmaWrites",
+ [IRDMA_HW_STAT_INDEX_RDMAVBND + IRDMA_HW_STAT_INDEX_MAX_32] =
+ "iwRdmaBnd",
+ [IRDMA_HW_STAT_INDEX_RDMAVINV + IRDMA_HW_STAT_INDEX_MAX_32] =
+ "iwRdmaInv",
+ [IRDMA_HW_STAT_INDEX_UDPRXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ "RxUDP",
+ [IRDMA_HW_STAT_INDEX_UDPTXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ "TxUDP",
+ [IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ "RxECNMrkd",
+};
+
+static void irdma_get_dev_fw_str(struct ib_device *dev, char *str)
+{
+ struct irdma_device *iwdev = to_iwdev(dev);
+
+ snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u",
+ irdma_fw_major_ver(&iwdev->rf->sc_dev),
+ irdma_fw_minor_ver(&iwdev->rf->sc_dev));
+}
+
+/**
+ * irdma_alloc_hw_port_stats - Allocate a hw stats structure
+ * @ibdev: device pointer from stack
+ * @port_num: port number
+ */
+static struct rdma_hw_stats *irdma_alloc_hw_port_stats(struct ib_device *ibdev,
+ u32 port_num)
+{
+ int num_counters = IRDMA_HW_STAT_INDEX_MAX_32 +
+ IRDMA_HW_STAT_INDEX_MAX_64;
+ unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN;
+
+ BUILD_BUG_ON(ARRAY_SIZE(irdma_hw_stat_names) !=
+ (IRDMA_HW_STAT_INDEX_MAX_32 + IRDMA_HW_STAT_INDEX_MAX_64));
+
+ return rdma_alloc_hw_stats_struct(irdma_hw_stat_names, num_counters,
+ lifespan);
+}
+
+/**
+ * irdma_get_hw_stats - Populates the rdma_hw_stats structure
+ * @ibdev: device pointer from stack
+ * @stats: stats pointer from stack
+ * @port_num: port number
+ * @index: which hw counter the stack is requesting we update
+ */
+static int irdma_get_hw_stats(struct ib_device *ibdev,
+ struct rdma_hw_stats *stats, u32 port_num,
+ int index)
+{
+ struct irdma_device *iwdev = to_iwdev(ibdev);
+ struct irdma_dev_hw_stats *hw_stats = &iwdev->vsi.pestat->hw_stats;
+
+ if (iwdev->rf->rdma_ver >= IRDMA_GEN_2)
+ irdma_cqp_gather_stats_cmd(&iwdev->rf->sc_dev, iwdev->vsi.pestat, true);
+ else
+ irdma_cqp_gather_stats_gen1(&iwdev->rf->sc_dev, iwdev->vsi.pestat);
+
+ memcpy(&stats->value[0], hw_stats, sizeof(*hw_stats));
+
+ return stats->num_counters;
+}
+
+/**
+ * irdma_query_gid - Query port GID
+ * @ibdev: device pointer from stack
+ * @port: port number
+ * @index: Entry index
+ * @gid: Global ID
+ */
+static int irdma_query_gid(struct ib_device *ibdev, u32 port, int index,
+ union ib_gid *gid)
+{
+ struct irdma_device *iwdev = to_iwdev(ibdev);
+
+ memset(gid->raw, 0, sizeof(gid->raw));
+ ether_addr_copy(gid->raw, iwdev->netdev->dev_addr);
+
+ return 0;
+}
+
+/**
+ * mcast_list_add - Add a new mcast item to list
+ * @rf: RDMA PCI function
+ * @new_elem: pointer to element to add
+ */
+static void mcast_list_add(struct irdma_pci_f *rf,
+ struct mc_table_list *new_elem)
+{
+ list_add(&new_elem->list, &rf->mc_qht_list.list);
+}
+
+/**
+ * mcast_list_del - Remove an mcast item from list
+ * @mc_qht_elem: pointer to mcast table list element
+ */
+static void mcast_list_del(struct mc_table_list *mc_qht_elem)
+{
+ if (mc_qht_elem)
+ list_del(&mc_qht_elem->list);
+}
+
+/**
+ * mcast_list_lookup_ip - Search mcast list for address
+ * @rf: RDMA PCI function
+ * @ip_mcast: pointer to mcast IP address
+ */
+static struct mc_table_list *mcast_list_lookup_ip(struct irdma_pci_f *rf,
+ u32 *ip_mcast)
+{
+ struct mc_table_list *mc_qht_el;
+ struct list_head *pos, *q;
+
+ list_for_each_safe (pos, q, &rf->mc_qht_list.list) {
+ mc_qht_el = list_entry(pos, struct mc_table_list, list);
+ if (!memcmp(mc_qht_el->mc_info.dest_ip, ip_mcast,
+ sizeof(mc_qht_el->mc_info.dest_ip)))
+ return mc_qht_el;
+ }
+
+ return NULL;
+}
+
+/**
+ * irdma_mcast_cqp_op - perform a mcast cqp operation
+ * @iwdev: irdma device
+ * @mc_grp_ctx: mcast group info
+ * @op: operation
+ *
+ * returns error status
+ */
+static int irdma_mcast_cqp_op(struct irdma_device *iwdev,
+ struct irdma_mcast_grp_info *mc_grp_ctx, u8 op)
+{
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_cqp_request *cqp_request;
+ enum irdma_status_code status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_request->info.in.u.mc_create.info = *mc_grp_ctx;
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = op;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.mc_create.scratch = (uintptr_t)cqp_request;
+ cqp_info->in.u.mc_create.cqp = &iwdev->rf->cqp.sc_cqp;
+ status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
+ irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
+ if (status)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * irdma_mcast_mac - Get the multicast MAC for an IP address
+ * @ip_addr: IPv4 or IPv6 address
+ * @mac: pointer to result MAC address
+ * @ipv4: flag indicating IPv4 or IPv6
+ *
+ */
+void irdma_mcast_mac(u32 *ip_addr, u8 *mac, bool ipv4)
+{
+ u8 *ip = (u8 *)ip_addr;
+
+ if (ipv4) {
+ unsigned char mac4[ETH_ALEN] = {0x01, 0x00, 0x5E, 0x00,
+ 0x00, 0x00};
+
+ mac4[3] = ip[2] & 0x7F;
+ mac4[4] = ip[1];
+ mac4[5] = ip[0];
+ ether_addr_copy(mac, mac4);
+ } else {
+ unsigned char mac6[ETH_ALEN] = {0x33, 0x33, 0x00, 0x00,
+ 0x00, 0x00};
+
+ mac6[2] = ip[3];
+ mac6[3] = ip[2];
+ mac6[4] = ip[1];
+ mac6[5] = ip[0];
+ ether_addr_copy(mac, mac6);
+ }
+}
+
+/**
+ * irdma_attach_mcast - attach a qp to a multicast group
+ * @ibqp: ptr to qp
+ * @ibgid: pointer to global ID
+ * @lid: local ID
+ *
+ * returns error status
+ */
+static int irdma_attach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
+{
+ struct irdma_qp *iwqp = to_iwqp(ibqp);
+ struct irdma_device *iwdev = iwqp->iwdev;
+ struct irdma_pci_f *rf = iwdev->rf;
+ struct mc_table_list *mc_qht_elem;
+ struct irdma_mcast_grp_ctx_entry_info mcg_info = {};
+ unsigned long flags;
+ u32 ip_addr[4] = {};
+ u32 mgn;
+ u32 no_mgs;
+ int ret = 0;
+ bool ipv4;
+ u16 vlan_id;
+ union {
+ struct sockaddr saddr;
+ struct sockaddr_in saddr_in;
+ struct sockaddr_in6 saddr_in6;
+ } sgid_addr;
+ unsigned char dmac[ETH_ALEN];
+
+ rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid);
+
+ if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid)) {
+ irdma_copy_ip_ntohl(ip_addr,
+ sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
+ irdma_netdev_vlan_ipv6(ip_addr, &vlan_id, NULL);
+ ipv4 = false;
+ ibdev_dbg(&iwdev->ibdev,
+ "VERBS: qp_id=%d, IP6address=%pI6\n", ibqp->qp_num,
+ ip_addr);
+ irdma_mcast_mac(ip_addr, dmac, false);
+ } else {
+ ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
+ ipv4 = true;
+ vlan_id = irdma_get_vlan_ipv4(ip_addr);
+ irdma_mcast_mac(ip_addr, dmac, true);
+ ibdev_dbg(&iwdev->ibdev,
+ "VERBS: qp_id=%d, IP4address=%pI4, MAC=%pM\n",
+ ibqp->qp_num, ip_addr, dmac);
+ }
+
+ spin_lock_irqsave(&rf->qh_list_lock, flags);
+ mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr);
+ if (!mc_qht_elem) {
+ struct irdma_dma_mem *dma_mem_mc;
+
+ spin_unlock_irqrestore(&rf->qh_list_lock, flags);
+ mc_qht_elem = kzalloc(sizeof(*mc_qht_elem), GFP_KERNEL);
+ if (!mc_qht_elem)
+ return -ENOMEM;
+
+ mc_qht_elem->mc_info.ipv4_valid = ipv4;
+ memcpy(mc_qht_elem->mc_info.dest_ip, ip_addr,
+ sizeof(mc_qht_elem->mc_info.dest_ip));
+ ret = irdma_alloc_rsrc(rf, rf->allocated_mcgs, rf->max_mcg,
+ &mgn, &rf->next_mcg);
+ if (ret) {
+ kfree(mc_qht_elem);
+ return -ENOMEM;
+ }
+
+ mc_qht_elem->mc_info.mgn = mgn;
+ dma_mem_mc = &mc_qht_elem->mc_grp_ctx.dma_mem_mc;
+ dma_mem_mc->size = ALIGN(sizeof(u64) * IRDMA_MAX_MGS_PER_CTX,
+ IRDMA_HW_PAGE_SIZE);
+ dma_mem_mc->va = dma_alloc_coherent(rf->hw.device,
+ dma_mem_mc->size,
+ &dma_mem_mc->pa,
+ GFP_KERNEL);
+ if (!dma_mem_mc->va) {
+ irdma_free_rsrc(rf, rf->allocated_mcgs, mgn);
+ kfree(mc_qht_elem);
+ return -ENOMEM;
+ }
+
+ mc_qht_elem->mc_grp_ctx.mg_id = (u16)mgn;
+ memcpy(mc_qht_elem->mc_grp_ctx.dest_ip_addr, ip_addr,
+ sizeof(mc_qht_elem->mc_grp_ctx.dest_ip_addr));
+ mc_qht_elem->mc_grp_ctx.ipv4_valid = ipv4;
+ mc_qht_elem->mc_grp_ctx.vlan_id = vlan_id;
+ if (vlan_id < VLAN_N_VID)
+ mc_qht_elem->mc_grp_ctx.vlan_valid = true;
+ mc_qht_elem->mc_grp_ctx.hmc_fcn_id = iwdev->vsi.fcn_id;
+ mc_qht_elem->mc_grp_ctx.qs_handle =
+ iwqp->sc_qp.vsi->qos[iwqp->sc_qp.user_pri].qs_handle;
+ ether_addr_copy(mc_qht_elem->mc_grp_ctx.dest_mac_addr, dmac);
+
+ spin_lock_irqsave(&rf->qh_list_lock, flags);
+ mcast_list_add(rf, mc_qht_elem);
+ } else {
+ if (mc_qht_elem->mc_grp_ctx.no_of_mgs ==
+ IRDMA_MAX_MGS_PER_CTX) {
+ spin_unlock_irqrestore(&rf->qh_list_lock, flags);
+ return -ENOMEM;
+ }
+ }
+
+ mcg_info.qp_id = iwqp->ibqp.qp_num;
+ no_mgs = mc_qht_elem->mc_grp_ctx.no_of_mgs;
+ irdma_sc_add_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
+ spin_unlock_irqrestore(&rf->qh_list_lock, flags);
+
+ /* Only if there is a change do we need to modify or create */
+ if (!no_mgs) {
+ ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
+ IRDMA_OP_MC_CREATE);
+ } else if (no_mgs != mc_qht_elem->mc_grp_ctx.no_of_mgs) {
+ ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
+ IRDMA_OP_MC_MODIFY);
+ } else {
+ return 0;
+ }
+
+ if (ret)
+ goto error;
+
+ return 0;
+
+error:
+ irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
+ if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) {
+ mcast_list_del(mc_qht_elem);
+ dma_free_coherent(rf->hw.device,
+ mc_qht_elem->mc_grp_ctx.dma_mem_mc.size,
+ mc_qht_elem->mc_grp_ctx.dma_mem_mc.va,
+ mc_qht_elem->mc_grp_ctx.dma_mem_mc.pa);
+ mc_qht_elem->mc_grp_ctx.dma_mem_mc.va = NULL;
+ irdma_free_rsrc(rf, rf->allocated_mcgs,
+ mc_qht_elem->mc_grp_ctx.mg_id);
+ kfree(mc_qht_elem);
+ }
+
+ return ret;
+}
+
+/**
+ * irdma_detach_mcast - detach a qp from a multicast group
+ * @ibqp: ptr to qp
+ * @ibgid: pointer to global ID
+ * @lid: local ID
+ *
+ * returns error status
+ */
+static int irdma_detach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
+{
+ struct irdma_qp *iwqp = to_iwqp(ibqp);
+ struct irdma_device *iwdev = iwqp->iwdev;
+ struct irdma_pci_f *rf = iwdev->rf;
+ u32 ip_addr[4] = {};
+ struct mc_table_list *mc_qht_elem;
+ struct irdma_mcast_grp_ctx_entry_info mcg_info = {};
+ int ret;
+ unsigned long flags;
+ union {
+ struct sockaddr saddr;
+ struct sockaddr_in saddr_in;
+ struct sockaddr_in6 saddr_in6;
+ } sgid_addr;
+
+ rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid);
+ if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid))
+ irdma_copy_ip_ntohl(ip_addr,
+ sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
+ else
+ ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
+
+ spin_lock_irqsave(&rf->qh_list_lock, flags);
+ mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr);
+ if (!mc_qht_elem) {
+ spin_unlock_irqrestore(&rf->qh_list_lock, flags);
+ ibdev_dbg(&iwdev->ibdev,
+ "VERBS: address not found MCG\n");
+ return 0;
+ }
+
+ mcg_info.qp_id = iwqp->ibqp.qp_num;
+ irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
+ if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) {
+ mcast_list_del(mc_qht_elem);
+ spin_unlock_irqrestore(&rf->qh_list_lock, flags);
+ ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
+ IRDMA_OP_MC_DESTROY);
+ if (ret) {
+ ibdev_dbg(&iwdev->ibdev,
+ "VERBS: failed MC_DESTROY MCG\n");
+ spin_lock_irqsave(&rf->qh_list_lock, flags);
+ mcast_list_add(rf, mc_qht_elem);
+ spin_unlock_irqrestore(&rf->qh_list_lock, flags);
+ return -EAGAIN;
+ }
+
+ dma_free_coherent(rf->hw.device,
+ mc_qht_elem->mc_grp_ctx.dma_mem_mc.size,
+ mc_qht_elem->mc_grp_ctx.dma_mem_mc.va,
+ mc_qht_elem->mc_grp_ctx.dma_mem_mc.pa);
+ mc_qht_elem->mc_grp_ctx.dma_mem_mc.va = NULL;
+ irdma_free_rsrc(rf, rf->allocated_mcgs,
+ mc_qht_elem->mc_grp_ctx.mg_id);
+ kfree(mc_qht_elem);
+ } else {
+ spin_unlock_irqrestore(&rf->qh_list_lock, flags);
+ ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
+ IRDMA_OP_MC_MODIFY);
+ if (ret) {
+ ibdev_dbg(&iwdev->ibdev,
+ "VERBS: failed Modify MCG\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_create_ah - create address handle
+ * @ibah: address handle
+ * @attr: address handle attributes
+ * @udata: User data
+ *
+ * returns 0 on success, error otherwise
+ */
+static int irdma_create_ah(struct ib_ah *ibah,
+ struct rdma_ah_init_attr *attr,
+ struct ib_udata *udata)
+{
+ struct irdma_pd *pd = to_iwpd(ibah->pd);
+ struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah);
+ struct rdma_ah_attr *ah_attr = attr->ah_attr;
+ const struct ib_gid_attr *sgid_attr;
+ struct irdma_device *iwdev = to_iwdev(ibah->pd->device);
+ struct irdma_pci_f *rf = iwdev->rf;
+ struct irdma_sc_ah *sc_ah;
+ u32 ah_id = 0;
+ struct irdma_ah_info *ah_info;
+ struct irdma_create_ah_resp uresp;
+ union {
+ struct sockaddr saddr;
+ struct sockaddr_in saddr_in;
+ struct sockaddr_in6 saddr_in6;
+ } sgid_addr, dgid_addr;
+ int err;
+ u8 dmac[ETH_ALEN];
+
+ err = irdma_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah, &ah_id,
+ &rf->next_ah);
+ if (err)
+ return err;
+
+ ah->pd = pd;
+ sc_ah = &ah->sc_ah;
+ sc_ah->ah_info.ah_idx = ah_id;
+ sc_ah->ah_info.vsi = &iwdev->vsi;
+ irdma_sc_init_ah(&rf->sc_dev, sc_ah);
+ ah->sgid_index = ah_attr->grh.sgid_index;
+ sgid_attr = ah_attr->grh.sgid_attr;
+ memcpy(&ah->dgid, &ah_attr->grh.dgid, sizeof(ah->dgid));
+ rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid_attr->gid);
+ rdma_gid2ip((struct sockaddr *)&dgid_addr, &ah_attr->grh.dgid);
+ ah->av.attrs = *ah_attr;
+ ah->av.net_type = rdma_gid_attr_network_type(sgid_attr);
+ ah->av.sgid_addr.saddr = sgid_addr.saddr;
+ ah->av.dgid_addr.saddr = dgid_addr.saddr;
+ ah_info = &sc_ah->ah_info;
+ ah_info->ah_idx = ah_id;
+ ah_info->pd_idx = pd->sc_pd.pd_id;
+ if (ah_attr->ah_flags & IB_AH_GRH) {
+ ah_info->flow_label = ah_attr->grh.flow_label;
+ ah_info->hop_ttl = ah_attr->grh.hop_limit;
+ ah_info->tc_tos = ah_attr->grh.traffic_class;
+ }
+
+ ether_addr_copy(dmac, ah_attr->roce.dmac);
+ if (rdma_gid_attr_network_type(sgid_attr) == RDMA_NETWORK_IPV4) {
+ ah_info->ipv4_valid = true;
+ ah_info->dest_ip_addr[0] =
+ ntohl(dgid_addr.saddr_in.sin_addr.s_addr);
+ ah_info->src_ip_addr[0] =
+ ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
+ ah_info->do_lpbk = irdma_ipv4_is_lpb(ah_info->src_ip_addr[0],
+ ah_info->dest_ip_addr[0]);
+ if (ipv4_is_multicast(dgid_addr.saddr_in.sin_addr.s_addr)) {
+ ah_info->do_lpbk = true;
+ irdma_mcast_mac(ah_info->dest_ip_addr, dmac, true);
+ }
+ } else {
+ irdma_copy_ip_ntohl(ah_info->dest_ip_addr,
+ dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
+ irdma_copy_ip_ntohl(ah_info->src_ip_addr,
+ sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
+ ah_info->do_lpbk = irdma_ipv6_is_lpb(ah_info->src_ip_addr,
+ ah_info->dest_ip_addr);
+ if (rdma_is_multicast_addr(&dgid_addr.saddr_in6.sin6_addr)) {
+ ah_info->do_lpbk = true;
+ irdma_mcast_mac(ah_info->dest_ip_addr, dmac, false);
+ }
+ }
+
+ err = rdma_read_gid_l2_fields(sgid_attr, &ah_info->vlan_tag,
+ ah_info->mac_addr);
+ if (err)
+ goto error;
+
+ ah_info->dst_arpindex = irdma_add_arp(iwdev->rf, ah_info->dest_ip_addr,
+ ah_info->ipv4_valid, dmac);
+
+ if (ah_info->dst_arpindex == -1) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ if (ah_info->vlan_tag >= VLAN_N_VID && iwdev->dcb)
+ ah_info->vlan_tag = 0;
+
+ if (ah_info->vlan_tag < VLAN_N_VID) {
+ ah_info->insert_vlan_tag = true;
+ ah_info->vlan_tag |=
+ rt_tos2priority(ah_info->tc_tos) << VLAN_PRIO_SHIFT;
+ }
+
+ err = irdma_ah_cqp_op(iwdev->rf, sc_ah, IRDMA_OP_AH_CREATE,
+ attr->flags & RDMA_CREATE_AH_SLEEPABLE,
+ irdma_gsi_ud_qp_ah_cb, sc_ah);
+
+ if (err) {
+ ibdev_dbg(&iwdev->ibdev,
+ "VERBS: CQP-OP Create AH fail");
+ goto error;
+ }
+
+ if (!(attr->flags & RDMA_CREATE_AH_SLEEPABLE)) {
+ int cnt = CQP_COMPL_WAIT_TIME_MS * CQP_TIMEOUT_THRESHOLD;
+
+ do {
+ irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
+ mdelay(1);
+ } while (!sc_ah->ah_info.ah_valid && --cnt);
+
+ if (!cnt) {
+ ibdev_dbg(&iwdev->ibdev,
+ "VERBS: CQP create AH timed out");
+ err = -ETIMEDOUT;
+ goto error;
+ }
+ }
+
+ if (udata) {
+ uresp.ah_id = ah->sc_ah.ah_info.ah_idx;
+ err = ib_copy_to_udata(udata, &uresp,
+ min(sizeof(uresp), udata->outlen));
+ }
+ return 0;
+
+error:
+ irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah_id);
+
+ return err;
+}
+
+/**
+ * irdma_destroy_ah - Destroy address handle
+ * @ibah: pointer to address handle
+ * @ah_flags: flags for sleepable
+ */
+static int irdma_destroy_ah(struct ib_ah *ibah, u32 ah_flags)
+{
+ struct irdma_device *iwdev = to_iwdev(ibah->device);
+ struct irdma_ah *ah = to_iwah(ibah);
+
+ irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah, IRDMA_OP_AH_DESTROY,
+ false, NULL, ah);
+
+ irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs,
+ ah->sc_ah.ah_info.ah_idx);
+
+ return 0;
+}
+
+/**
+ * irdma_query_ah - Query address handle
+ * @ibah: pointer to address handle
+ * @ah_attr: address handle attributes
+ */
+static int irdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
+{
+ struct irdma_ah *ah = to_iwah(ibah);
+
+ memset(ah_attr, 0, sizeof(*ah_attr));
+ if (ah->av.attrs.ah_flags & IB_AH_GRH) {
+ ah_attr->ah_flags = IB_AH_GRH;
+ ah_attr->grh.flow_label = ah->sc_ah.ah_info.flow_label;
+ ah_attr->grh.traffic_class = ah->sc_ah.ah_info.tc_tos;
+ ah_attr->grh.hop_limit = ah->sc_ah.ah_info.hop_ttl;
+ ah_attr->grh.sgid_index = ah->sgid_index;
+ ah_attr->grh.sgid_index = ah->sgid_index;
+ memcpy(&ah_attr->grh.dgid, &ah->dgid,
+ sizeof(ah_attr->grh.dgid));
+ }
+
+ return 0;
+}
+
+static enum rdma_link_layer irdma_get_link_layer(struct ib_device *ibdev,
+ u32 port_num)
+{
+ return IB_LINK_LAYER_ETHERNET;
+}
+
+static __be64 irdma_mac_to_guid(struct net_device *ndev)
+{
+ unsigned char *mac = ndev->dev_addr;
+ __be64 guid;
+ unsigned char *dst = (unsigned char *)&guid;
+
+ dst[0] = mac[0] ^ 2;
+ dst[1] = mac[1];
+ dst[2] = mac[2];
+ dst[3] = 0xff;
+ dst[4] = 0xfe;
+ dst[5] = mac[3];
+ dst[6] = mac[4];
+ dst[7] = mac[5];
+
+ return guid;
+}
+
+static const struct ib_device_ops irdma_roce_dev_ops = {
+ .attach_mcast = irdma_attach_mcast,
+ .create_ah = irdma_create_ah,
+ .create_user_ah = irdma_create_ah,
+ .destroy_ah = irdma_destroy_ah,
+ .detach_mcast = irdma_detach_mcast,
+ .get_link_layer = irdma_get_link_layer,
+ .get_port_immutable = irdma_roce_port_immutable,
+ .modify_qp = irdma_modify_qp_roce,
+ .query_ah = irdma_query_ah,
+ .query_pkey = irdma_query_pkey,
+};
+
+static const struct ib_device_ops irdma_iw_dev_ops = {
+ .modify_qp = irdma_modify_qp,
+ .get_port_immutable = irdma_iw_port_immutable,
+ .query_gid = irdma_query_gid,
+};
+
+static const struct ib_device_ops irdma_dev_ops = {
+ .owner = THIS_MODULE,
+ .driver_id = RDMA_DRIVER_IRDMA,
+ .uverbs_abi_ver = IRDMA_ABI_VER,
+
+ .alloc_hw_port_stats = irdma_alloc_hw_port_stats,
+ .alloc_mr = irdma_alloc_mr,
+ .alloc_mw = irdma_alloc_mw,
+ .alloc_pd = irdma_alloc_pd,
+ .alloc_ucontext = irdma_alloc_ucontext,
+ .create_cq = irdma_create_cq,
+ .create_qp = irdma_create_qp,
+ .dealloc_driver = irdma_ib_dealloc_device,
+ .dealloc_mw = irdma_dealloc_mw,
+ .dealloc_pd = irdma_dealloc_pd,
+ .dealloc_ucontext = irdma_dealloc_ucontext,
+ .dereg_mr = irdma_dereg_mr,
+ .destroy_cq = irdma_destroy_cq,
+ .destroy_qp = irdma_destroy_qp,
+ .disassociate_ucontext = irdma_disassociate_ucontext,
+ .get_dev_fw_str = irdma_get_dev_fw_str,
+ .get_dma_mr = irdma_get_dma_mr,
+ .get_hw_stats = irdma_get_hw_stats,
+ .map_mr_sg = irdma_map_mr_sg,
+ .mmap = irdma_mmap,
+ .mmap_free = irdma_mmap_free,
+ .poll_cq = irdma_poll_cq,
+ .post_recv = irdma_post_recv,
+ .post_send = irdma_post_send,
+ .query_device = irdma_query_device,
+ .query_port = irdma_query_port,
+ .query_qp = irdma_query_qp,
+ .reg_user_mr = irdma_reg_user_mr,
+ .req_notify_cq = irdma_req_notify_cq,
+ .resize_cq = irdma_resize_cq,
+ INIT_RDMA_OBJ_SIZE(ib_pd, irdma_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, irdma_ucontext, ibucontext),
+ INIT_RDMA_OBJ_SIZE(ib_ah, irdma_ah, ibah),
+ INIT_RDMA_OBJ_SIZE(ib_cq, irdma_cq, ibcq),
+ INIT_RDMA_OBJ_SIZE(ib_mw, irdma_mr, ibmw),
+};
+
+/**
+ * irdma_init_roce_device - initialization of roce rdma device
+ * @iwdev: irdma device
+ */
+static void irdma_init_roce_device(struct irdma_device *iwdev)
+{
+ iwdev->ibdev.node_type = RDMA_NODE_IB_CA;
+ iwdev->ibdev.node_guid = irdma_mac_to_guid(iwdev->netdev);
+ ib_set_device_ops(&iwdev->ibdev, &irdma_roce_dev_ops);
+}
+
+/**
+ * irdma_init_iw_device - initialization of iwarp rdma device
+ * @iwdev: irdma device
+ */
+static int irdma_init_iw_device(struct irdma_device *iwdev)
+{
+ struct net_device *netdev = iwdev->netdev;
+
+ iwdev->ibdev.node_type = RDMA_NODE_RNIC;
+ ether_addr_copy((u8 *)&iwdev->ibdev.node_guid, netdev->dev_addr);
+ iwdev->ibdev.ops.iw_add_ref = irdma_qp_add_ref;
+ iwdev->ibdev.ops.iw_rem_ref = irdma_qp_rem_ref;
+ iwdev->ibdev.ops.iw_get_qp = irdma_get_qp;
+ iwdev->ibdev.ops.iw_connect = irdma_connect;
+ iwdev->ibdev.ops.iw_accept = irdma_accept;
+ iwdev->ibdev.ops.iw_reject = irdma_reject;
+ iwdev->ibdev.ops.iw_create_listen = irdma_create_listen;
+ iwdev->ibdev.ops.iw_destroy_listen = irdma_destroy_listen;
+ memcpy(iwdev->ibdev.iw_ifname, netdev->name,
+ sizeof(iwdev->ibdev.iw_ifname));
+ ib_set_device_ops(&iwdev->ibdev, &irdma_iw_dev_ops);
+
+ return 0;
+}
+
+/**
+ * irdma_init_rdma_device - initialization of rdma device
+ * @iwdev: irdma device
+ */
+static int irdma_init_rdma_device(struct irdma_device *iwdev)
+{
+ struct pci_dev *pcidev = iwdev->rf->pcidev;
+ int ret;
+
+ if (iwdev->roce_mode) {
+ irdma_init_roce_device(iwdev);
+ } else {
+ ret = irdma_init_iw_device(iwdev);
+ if (ret)
+ return ret;
+ }
+ iwdev->ibdev.phys_port_cnt = 1;
+ iwdev->ibdev.num_comp_vectors = iwdev->rf->ceqs_count;
+ iwdev->ibdev.dev.parent = &pcidev->dev;
+ ib_set_device_ops(&iwdev->ibdev, &irdma_dev_ops);
+
+ return 0;
+}
+
+/**
+ * irdma_port_ibevent - indicate port event
+ * @iwdev: irdma device
+ */
+void irdma_port_ibevent(struct irdma_device *iwdev)
+{
+ struct ib_event event;
+
+ event.device = &iwdev->ibdev;
+ event.element.port_num = 1;
+ event.event =
+ iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
+ ib_dispatch_event(&event);
+}
+
+/**
+ * irdma_ib_unregister_device - unregister rdma device from IB
+ * core
+ * @iwdev: irdma device
+ */
+void irdma_ib_unregister_device(struct irdma_device *iwdev)
+{
+ iwdev->iw_status = 0;
+ irdma_port_ibevent(iwdev);
+ ib_unregister_device(&iwdev->ibdev);
+}
+
+/**
+ * irdma_ib_register_device - register irdma device to IB core
+ * @iwdev: irdma device
+ */
+int irdma_ib_register_device(struct irdma_device *iwdev)
+{
+ int ret;
+
+ ret = irdma_init_rdma_device(iwdev);
+ if (ret)
+ return ret;
+
+ ret = ib_device_set_netdev(&iwdev->ibdev, iwdev->netdev, 1);
+ if (ret)
+ goto error;
+ dma_set_max_seg_size(iwdev->rf->hw.device, UINT_MAX);
+ ret = ib_register_device(&iwdev->ibdev, "irdma%d", iwdev->rf->hw.device);
+ if (ret)
+ goto error;
+
+ iwdev->iw_status = 1;
+ irdma_port_ibevent(iwdev);
+
+ return 0;
+
+error:
+ if (ret)
+ ibdev_dbg(&iwdev->ibdev, "VERBS: Register RDMA device fail\n");
+
+ return ret;
+}
+
+/**
+ * irdma_ib_dealloc_device
+ * @ibdev: ib device
+ *
+ * callback from ibdev dealloc_driver to deallocate resources
+ * unber irdma device
+ */
+void irdma_ib_dealloc_device(struct ib_device *ibdev)
+{
+ struct irdma_device *iwdev = to_iwdev(ibdev);
+
+ irdma_rt_deinit_hw(iwdev);
+ irdma_ctrl_deinit_hw(iwdev->rf);
+ kfree(iwdev->rf);
+}
diff --git a/drivers/infiniband/hw/irdma/verbs.h b/drivers/infiniband/hw/irdma/verbs.h
new file mode 100644
index 000000000000..5c244cd321a3
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/verbs.h
@@ -0,0 +1,225 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#ifndef IRDMA_VERBS_H
+#define IRDMA_VERBS_H
+
+#define IRDMA_MAX_SAVED_PHY_PGADDR 4
+
+#define IRDMA_PKEY_TBL_SZ 1
+#define IRDMA_DEFAULT_PKEY 0xFFFF
+
+struct irdma_ucontext {
+ struct ib_ucontext ibucontext;
+ struct irdma_device *iwdev;
+ struct rdma_user_mmap_entry *db_mmap_entry;
+ struct list_head cq_reg_mem_list;
+ spinlock_t cq_reg_mem_list_lock; /* protect CQ memory list */
+ struct list_head qp_reg_mem_list;
+ spinlock_t qp_reg_mem_list_lock; /* protect QP memory list */
+ int abi_ver;
+ bool legacy_mode;
+};
+
+struct irdma_pd {
+ struct ib_pd ibpd;
+ struct irdma_sc_pd sc_pd;
+};
+
+struct irdma_av {
+ u8 macaddr[16];
+ struct rdma_ah_attr attrs;
+ union {
+ struct sockaddr saddr;
+ struct sockaddr_in saddr_in;
+ struct sockaddr_in6 saddr_in6;
+ } sgid_addr, dgid_addr;
+ u8 net_type;
+};
+
+struct irdma_ah {
+ struct ib_ah ibah;
+ struct irdma_sc_ah sc_ah;
+ struct irdma_pd *pd;
+ struct irdma_av av;
+ u8 sgid_index;
+ union ib_gid dgid;
+};
+
+struct irdma_hmc_pble {
+ union {
+ u32 idx;
+ dma_addr_t addr;
+ };
+};
+
+struct irdma_cq_mr {
+ struct irdma_hmc_pble cq_pbl;
+ dma_addr_t shadow;
+ bool split;
+};
+
+struct irdma_qp_mr {
+ struct irdma_hmc_pble sq_pbl;
+ struct irdma_hmc_pble rq_pbl;
+ dma_addr_t shadow;
+ struct page *sq_page;
+};
+
+struct irdma_cq_buf {
+ struct irdma_dma_mem kmem_buf;
+ struct irdma_cq_uk cq_uk;
+ struct irdma_hw *hw;
+ struct list_head list;
+ struct work_struct work;
+};
+
+struct irdma_pbl {
+ struct list_head list;
+ union {
+ struct irdma_qp_mr qp_mr;
+ struct irdma_cq_mr cq_mr;
+ };
+
+ bool pbl_allocated:1;
+ bool on_list:1;
+ u64 user_base;
+ struct irdma_pble_alloc pble_alloc;
+ struct irdma_mr *iwmr;
+};
+
+struct irdma_mr {
+ union {
+ struct ib_mr ibmr;
+ struct ib_mw ibmw;
+ };
+ struct ib_umem *region;
+ u16 type;
+ u32 page_cnt;
+ u64 page_size;
+ u32 npages;
+ u32 stag;
+ u64 len;
+ u64 pgaddrmem[IRDMA_MAX_SAVED_PHY_PGADDR];
+ struct irdma_pbl iwpbl;
+};
+
+struct irdma_cq {
+ struct ib_cq ibcq;
+ struct irdma_sc_cq sc_cq;
+ u16 cq_head;
+ u16 cq_size;
+ u16 cq_num;
+ bool user_mode;
+ u32 polled_cmpls;
+ u32 cq_mem_size;
+ struct irdma_dma_mem kmem;
+ struct irdma_dma_mem kmem_shadow;
+ spinlock_t lock; /* for poll cq */
+ struct irdma_pbl *iwpbl;
+ struct irdma_pbl *iwpbl_shadow;
+ struct list_head resize_list;
+ struct irdma_cq_poll_info cur_cqe;
+};
+
+struct disconn_work {
+ struct work_struct work;
+ struct irdma_qp *iwqp;
+};
+
+struct iw_cm_id;
+
+struct irdma_qp_kmode {
+ struct irdma_dma_mem dma_mem;
+ struct irdma_sq_uk_wr_trk_info *sq_wrid_mem;
+ u64 *rq_wrid_mem;
+};
+
+struct irdma_qp {
+ struct ib_qp ibqp;
+ struct irdma_sc_qp sc_qp;
+ struct irdma_device *iwdev;
+ struct irdma_cq *iwscq;
+ struct irdma_cq *iwrcq;
+ struct irdma_pd *iwpd;
+ struct rdma_user_mmap_entry *push_wqe_mmap_entry;
+ struct rdma_user_mmap_entry *push_db_mmap_entry;
+ struct irdma_qp_host_ctx_info ctx_info;
+ union {
+ struct irdma_iwarp_offload_info iwarp_info;
+ struct irdma_roce_offload_info roce_info;
+ };
+
+ union {
+ struct irdma_tcp_offload_info tcp_info;
+ struct irdma_udp_offload_info udp_info;
+ };
+
+ struct irdma_ah roce_ah;
+ struct list_head teardown_entry;
+ refcount_t refcnt;
+ struct iw_cm_id *cm_id;
+ struct irdma_cm_node *cm_node;
+ struct ib_mr *lsmm_mr;
+ atomic_t hw_mod_qp_pend;
+ enum ib_qp_state ibqp_state;
+ u32 qp_mem_size;
+ u32 last_aeq;
+ int max_send_wr;
+ int max_recv_wr;
+ atomic_t close_timer_started;
+ spinlock_t lock; /* serialize posting WRs to SQ/RQ */
+ struct irdma_qp_context *iwqp_context;
+ void *pbl_vbase;
+ dma_addr_t pbl_pbase;
+ struct page *page;
+ u8 active_conn : 1;
+ u8 user_mode : 1;
+ u8 hte_added : 1;
+ u8 flush_issued : 1;
+ u8 sig_all : 1;
+ u8 pau_mode : 1;
+ u8 rsvd : 1;
+ u8 iwarp_state;
+ u16 term_sq_flush_code;
+ u16 term_rq_flush_code;
+ u8 hw_iwarp_state;
+ u8 hw_tcp_state;
+ struct irdma_qp_kmode kqp;
+ struct irdma_dma_mem host_ctx;
+ struct timer_list terminate_timer;
+ struct irdma_pbl *iwpbl;
+ struct irdma_dma_mem q2_ctx_mem;
+ struct irdma_dma_mem ietf_mem;
+ struct completion free_qp;
+ wait_queue_head_t waitq;
+ wait_queue_head_t mod_qp_waitq;
+ u8 rts_ae_rcvd;
+};
+
+enum irdma_mmap_flag {
+ IRDMA_MMAP_IO_NC,
+ IRDMA_MMAP_IO_WC,
+};
+
+struct irdma_user_mmap_entry {
+ struct rdma_user_mmap_entry rdma_entry;
+ u64 bar_offset;
+ u8 mmap_flag;
+};
+
+static inline u16 irdma_fw_major_ver(struct irdma_sc_dev *dev)
+{
+ return (u16)FIELD_GET(IRDMA_FW_VER_MAJOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
+}
+
+static inline u16 irdma_fw_minor_ver(struct irdma_sc_dev *dev)
+{
+ return (u16)FIELD_GET(IRDMA_FW_VER_MINOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
+}
+
+void irdma_mcast_mac(u32 *ip_addr, u8 *mac, bool ipv4);
+int irdma_ib_register_device(struct irdma_device *iwdev);
+void irdma_ib_unregister_device(struct irdma_device *iwdev);
+void irdma_ib_dealloc_device(struct ib_device *ibdev);
+void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event);
+#endif /* IRDMA_VERBS_H */
diff --git a/drivers/infiniband/hw/irdma/ws.c b/drivers/infiniband/hw/irdma/ws.c
new file mode 100644
index 000000000000..b68c575eb78e
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/ws.c
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2017 - 2021 Intel Corporation */
+#include "osdep.h"
+#include "status.h"
+#include "hmc.h"
+#include "defs.h"
+#include "type.h"
+#include "protos.h"
+
+#include "ws.h"
+
+/**
+ * irdma_alloc_node - Allocate a WS node and init
+ * @vsi: vsi pointer
+ * @user_pri: user priority
+ * @node_type: Type of node, leaf or parent
+ * @parent: parent node pointer
+ */
+static struct irdma_ws_node *irdma_alloc_node(struct irdma_sc_vsi *vsi,
+ u8 user_pri,
+ enum irdma_ws_node_type node_type,
+ struct irdma_ws_node *parent)
+{
+ struct irdma_virt_mem ws_mem;
+ struct irdma_ws_node *node;
+ u16 node_index = 0;
+
+ ws_mem.size = sizeof(struct irdma_ws_node);
+ ws_mem.va = kzalloc(ws_mem.size, GFP_KERNEL);
+ if (!ws_mem.va)
+ return NULL;
+
+ if (parent) {
+ node_index = irdma_alloc_ws_node_id(vsi->dev);
+ if (node_index == IRDMA_WS_NODE_INVALID) {
+ kfree(ws_mem.va);
+ return NULL;
+ }
+ }
+
+ node = ws_mem.va;
+ node->index = node_index;
+ node->vsi_index = vsi->vsi_idx;
+ INIT_LIST_HEAD(&node->child_list_head);
+ if (node_type == WS_NODE_TYPE_LEAF) {
+ node->type_leaf = true;
+ node->traffic_class = vsi->qos[user_pri].traffic_class;
+ node->user_pri = user_pri;
+ node->rel_bw = vsi->qos[user_pri].rel_bw;
+ if (!node->rel_bw)
+ node->rel_bw = 1;
+
+ node->lan_qs_handle = vsi->qos[user_pri].lan_qos_handle;
+ node->prio_type = IRDMA_PRIO_WEIGHTED_RR;
+ } else {
+ node->rel_bw = 1;
+ node->prio_type = IRDMA_PRIO_WEIGHTED_RR;
+ node->enable = true;
+ }
+
+ node->parent = parent;
+
+ return node;
+}
+
+/**
+ * irdma_free_node - Free a WS node
+ * @vsi: VSI stricture of device
+ * @node: Pointer to node to free
+ */
+static void irdma_free_node(struct irdma_sc_vsi *vsi,
+ struct irdma_ws_node *node)
+{
+ struct irdma_virt_mem ws_mem;
+
+ if (node->index)
+ irdma_free_ws_node_id(vsi->dev, node->index);
+
+ ws_mem.va = node;
+ ws_mem.size = sizeof(struct irdma_ws_node);
+ kfree(ws_mem.va);
+}
+
+/**
+ * irdma_ws_cqp_cmd - Post CQP work scheduler node cmd
+ * @vsi: vsi pointer
+ * @node: pointer to node
+ * @cmd: add, remove or modify
+ */
+static enum irdma_status_code
+irdma_ws_cqp_cmd(struct irdma_sc_vsi *vsi, struct irdma_ws_node *node, u8 cmd)
+{
+ struct irdma_ws_node_info node_info = {};
+
+ node_info.id = node->index;
+ node_info.vsi = node->vsi_index;
+ if (node->parent)
+ node_info.parent_id = node->parent->index;
+ else
+ node_info.parent_id = node_info.id;
+
+ node_info.weight = node->rel_bw;
+ node_info.tc = node->traffic_class;
+ node_info.prio_type = node->prio_type;
+ node_info.type_leaf = node->type_leaf;
+ node_info.enable = node->enable;
+ if (irdma_cqp_ws_node_cmd(vsi->dev, cmd, &node_info)) {
+ ibdev_dbg(to_ibdev(vsi->dev), "WS: CQP WS CMD failed\n");
+ return IRDMA_ERR_NO_MEMORY;
+ }
+
+ if (node->type_leaf && cmd == IRDMA_OP_WS_ADD_NODE) {
+ node->qs_handle = node_info.qs_handle;
+ vsi->qos[node->user_pri].qs_handle = node_info.qs_handle;
+ }
+
+ return 0;
+}
+
+/**
+ * ws_find_node - Find SC WS node based on VSI id or TC
+ * @parent: parent node of First VSI or TC node
+ * @match_val: value to match
+ * @type: match type VSI/TC
+ */
+static struct irdma_ws_node *ws_find_node(struct irdma_ws_node *parent,
+ u16 match_val,
+ enum irdma_ws_match_type type)
+{
+ struct irdma_ws_node *node;
+
+ switch (type) {
+ case WS_MATCH_TYPE_VSI:
+ list_for_each_entry(node, &parent->child_list_head, siblings) {
+ if (node->vsi_index == match_val)
+ return node;
+ }
+ break;
+ case WS_MATCH_TYPE_TC:
+ list_for_each_entry(node, &parent->child_list_head, siblings) {
+ if (node->traffic_class == match_val)
+ return node;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return NULL;
+}
+
+/**
+ * irdma_tc_in_use - Checks to see if a leaf node is in use
+ * @vsi: vsi pointer
+ * @user_pri: user priority
+ */
+static bool irdma_tc_in_use(struct irdma_sc_vsi *vsi, u8 user_pri)
+{
+ int i;
+
+ mutex_lock(&vsi->qos[user_pri].qos_mutex);
+ if (!list_empty(&vsi->qos[user_pri].qplist)) {
+ mutex_unlock(&vsi->qos[user_pri].qos_mutex);
+ return true;
+ }
+
+ /* Check if the traffic class associated with the given user priority
+ * is in use by any other user priority. If so, nothing left to do
+ */
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
+ if (vsi->qos[i].traffic_class == vsi->qos[user_pri].traffic_class &&
+ !list_empty(&vsi->qos[i].qplist)) {
+ mutex_unlock(&vsi->qos[user_pri].qos_mutex);
+ return true;
+ }
+ }
+ mutex_unlock(&vsi->qos[user_pri].qos_mutex);
+
+ return false;
+}
+
+/**
+ * irdma_remove_leaf - Remove leaf node unconditionally
+ * @vsi: vsi pointer
+ * @user_pri: user priority
+ */
+static void irdma_remove_leaf(struct irdma_sc_vsi *vsi, u8 user_pri)
+{
+ struct irdma_ws_node *ws_tree_root, *vsi_node, *tc_node;
+ int i;
+ u16 traffic_class;
+
+ traffic_class = vsi->qos[user_pri].traffic_class;
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++)
+ if (vsi->qos[i].traffic_class == traffic_class)
+ vsi->qos[i].valid = false;
+
+ ws_tree_root = vsi->dev->ws_tree_root;
+ if (!ws_tree_root)
+ return;
+
+ vsi_node = ws_find_node(ws_tree_root, vsi->vsi_idx,
+ WS_MATCH_TYPE_VSI);
+ if (!vsi_node)
+ return;
+
+ tc_node = ws_find_node(vsi_node,
+ vsi->qos[user_pri].traffic_class,
+ WS_MATCH_TYPE_TC);
+ if (!tc_node)
+ return;
+
+ irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_DELETE_NODE);
+ vsi->unregister_qset(vsi, tc_node);
+ list_del(&tc_node->siblings);
+ irdma_free_node(vsi, tc_node);
+ /* Check if VSI node can be freed */
+ if (list_empty(&vsi_node->child_list_head)) {
+ irdma_ws_cqp_cmd(vsi, vsi_node, IRDMA_OP_WS_DELETE_NODE);
+ list_del(&vsi_node->siblings);
+ irdma_free_node(vsi, vsi_node);
+ /* Free head node there are no remaining VSI nodes */
+ if (list_empty(&ws_tree_root->child_list_head)) {
+ irdma_ws_cqp_cmd(vsi, ws_tree_root,
+ IRDMA_OP_WS_DELETE_NODE);
+ irdma_free_node(vsi, ws_tree_root);
+ vsi->dev->ws_tree_root = NULL;
+ }
+ }
+}
+
+/**
+ * irdma_ws_add - Build work scheduler tree, set RDMA qs_handle
+ * @vsi: vsi pointer
+ * @user_pri: user priority
+ */
+enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
+{
+ struct irdma_ws_node *ws_tree_root;
+ struct irdma_ws_node *vsi_node;
+ struct irdma_ws_node *tc_node;
+ u16 traffic_class;
+ enum irdma_status_code ret = 0;
+ int i;
+
+ mutex_lock(&vsi->dev->ws_mutex);
+ if (vsi->tc_change_pending) {
+ ret = IRDMA_ERR_NOT_READY;
+ goto exit;
+ }
+
+ if (vsi->qos[user_pri].valid)
+ goto exit;
+
+ ws_tree_root = vsi->dev->ws_tree_root;
+ if (!ws_tree_root) {
+ ibdev_dbg(to_ibdev(vsi->dev), "WS: Creating root node\n");
+ ws_tree_root = irdma_alloc_node(vsi, user_pri,
+ WS_NODE_TYPE_PARENT, NULL);
+ if (!ws_tree_root) {
+ ret = IRDMA_ERR_NO_MEMORY;
+ goto exit;
+ }
+
+ ret = irdma_ws_cqp_cmd(vsi, ws_tree_root, IRDMA_OP_WS_ADD_NODE);
+ if (ret) {
+ irdma_free_node(vsi, ws_tree_root);
+ goto exit;
+ }
+
+ vsi->dev->ws_tree_root = ws_tree_root;
+ }
+
+ /* Find a second tier node that matches the VSI */
+ vsi_node = ws_find_node(ws_tree_root, vsi->vsi_idx,
+ WS_MATCH_TYPE_VSI);
+
+ /* If VSI node doesn't exist, add one */
+ if (!vsi_node) {
+ ibdev_dbg(to_ibdev(vsi->dev),
+ "WS: Node not found matching VSI %d\n",
+ vsi->vsi_idx);
+ vsi_node = irdma_alloc_node(vsi, user_pri, WS_NODE_TYPE_PARENT,
+ ws_tree_root);
+ if (!vsi_node) {
+ ret = IRDMA_ERR_NO_MEMORY;
+ goto vsi_add_err;
+ }
+
+ ret = irdma_ws_cqp_cmd(vsi, vsi_node, IRDMA_OP_WS_ADD_NODE);
+ if (ret) {
+ irdma_free_node(vsi, vsi_node);
+ goto vsi_add_err;
+ }
+
+ list_add(&vsi_node->siblings, &ws_tree_root->child_list_head);
+ }
+
+ ibdev_dbg(to_ibdev(vsi->dev),
+ "WS: Using node %d which represents VSI %d\n",
+ vsi_node->index, vsi->vsi_idx);
+ traffic_class = vsi->qos[user_pri].traffic_class;
+ tc_node = ws_find_node(vsi_node, traffic_class,
+ WS_MATCH_TYPE_TC);
+ if (!tc_node) {
+ /* Add leaf node */
+ ibdev_dbg(to_ibdev(vsi->dev),
+ "WS: Node not found matching VSI %d and TC %d\n",
+ vsi->vsi_idx, traffic_class);
+ tc_node = irdma_alloc_node(vsi, user_pri, WS_NODE_TYPE_LEAF,
+ vsi_node);
+ if (!tc_node) {
+ ret = IRDMA_ERR_NO_MEMORY;
+ goto leaf_add_err;
+ }
+
+ ret = irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_ADD_NODE);
+ if (ret) {
+ irdma_free_node(vsi, tc_node);
+ goto leaf_add_err;
+ }
+
+ list_add(&tc_node->siblings, &vsi_node->child_list_head);
+ /*
+ * callback to LAN to update the LAN tree with our node
+ */
+ ret = vsi->register_qset(vsi, tc_node);
+ if (ret)
+ goto reg_err;
+
+ tc_node->enable = true;
+ ret = irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_MODIFY_NODE);
+ if (ret)
+ goto reg_err;
+ }
+ ibdev_dbg(to_ibdev(vsi->dev),
+ "WS: Using node %d which represents VSI %d TC %d\n",
+ tc_node->index, vsi->vsi_idx, traffic_class);
+ /*
+ * Iterate through other UPs and update the QS handle if they have
+ * a matching traffic class.
+ */
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
+ if (vsi->qos[i].traffic_class == traffic_class) {
+ vsi->qos[i].qs_handle = tc_node->qs_handle;
+ vsi->qos[i].lan_qos_handle = tc_node->lan_qs_handle;
+ vsi->qos[i].l2_sched_node_id = tc_node->l2_sched_node_id;
+ vsi->qos[i].valid = true;
+ }
+ }
+ goto exit;
+
+leaf_add_err:
+ if (list_empty(&vsi_node->child_list_head)) {
+ if (irdma_ws_cqp_cmd(vsi, vsi_node, IRDMA_OP_WS_DELETE_NODE))
+ goto exit;
+ list_del(&vsi_node->siblings);
+ irdma_free_node(vsi, vsi_node);
+ }
+
+vsi_add_err:
+ /* Free head node there are no remaining VSI nodes */
+ if (list_empty(&ws_tree_root->child_list_head)) {
+ irdma_ws_cqp_cmd(vsi, ws_tree_root, IRDMA_OP_WS_DELETE_NODE);
+ vsi->dev->ws_tree_root = NULL;
+ irdma_free_node(vsi, ws_tree_root);
+ }
+
+exit:
+ mutex_unlock(&vsi->dev->ws_mutex);
+ return ret;
+
+reg_err:
+ mutex_unlock(&vsi->dev->ws_mutex);
+ irdma_ws_remove(vsi, user_pri);
+ return ret;
+}
+
+/**
+ * irdma_ws_remove - Free WS scheduler node, update WS tree
+ * @vsi: vsi pointer
+ * @user_pri: user priority
+ */
+void irdma_ws_remove(struct irdma_sc_vsi *vsi, u8 user_pri)
+{
+ mutex_lock(&vsi->dev->ws_mutex);
+ if (irdma_tc_in_use(vsi, user_pri))
+ goto exit;
+ irdma_remove_leaf(vsi, user_pri);
+exit:
+ mutex_unlock(&vsi->dev->ws_mutex);
+}
+
+/**
+ * irdma_ws_reset - Reset entire WS tree
+ * @vsi: vsi pointer
+ */
+void irdma_ws_reset(struct irdma_sc_vsi *vsi)
+{
+ u8 i;
+
+ mutex_lock(&vsi->dev->ws_mutex);
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; ++i)
+ irdma_remove_leaf(vsi, i);
+ mutex_unlock(&vsi->dev->ws_mutex);
+}
diff --git a/drivers/infiniband/hw/irdma/ws.h b/drivers/infiniband/hw/irdma/ws.h
new file mode 100644
index 000000000000..f0e16f630701
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/ws.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2015 - 2020 Intel Corporation */
+#ifndef IRDMA_WS_H
+#define IRDMA_WS_H
+
+#include "osdep.h"
+
+enum irdma_ws_node_type {
+ WS_NODE_TYPE_PARENT,
+ WS_NODE_TYPE_LEAF,
+};
+
+enum irdma_ws_match_type {
+ WS_MATCH_TYPE_VSI,
+ WS_MATCH_TYPE_TC,
+};
+
+struct irdma_ws_node {
+ struct list_head siblings;
+ struct list_head child_list_head;
+ struct irdma_ws_node *parent;
+ u64 lan_qs_handle; /* opaque handle used by LAN */
+ u32 l2_sched_node_id;
+ u16 index;
+ u16 qs_handle;
+ u16 vsi_index;
+ u8 traffic_class;
+ u8 user_pri;
+ u8 rel_bw;
+ u8 abstraction_layer; /* used for splitting a TC */
+ u8 prio_type;
+ bool type_leaf:1;
+ bool enable:1;
+};
+
+struct irdma_sc_vsi;
+enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri);
+void irdma_ws_remove(struct irdma_sc_vsi *vsi, u8 user_pri);
+void irdma_ws_reset(struct irdma_sc_vsi *vsi);
+
+#endif /* IRDMA_WS_H */
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index e9b5a4d57fb1..4cd738aae53c 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -135,7 +135,7 @@ static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *
mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf);
}
-static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_udata *udata,
+static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev,
struct mlx4_ib_cq_buf *buf,
struct ib_umem **umem, u64 buf_addr, int cqe)
{
@@ -210,7 +210,7 @@ int mlx4_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
}
buf_addr = (void *)(unsigned long)ucmd.buf_addr;
- err = mlx4_ib_get_cq_umem(dev, udata, &cq->buf, &cq->umem,
+ err = mlx4_ib_get_cq_umem(dev, &cq->buf, &cq->umem,
ucmd.buf_addr, entries);
if (err)
goto err_cq;
@@ -327,8 +327,8 @@ static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq
if (!cq->resize_buf)
return -ENOMEM;
- err = mlx4_ib_get_cq_umem(dev, udata, &cq->resize_buf->buf,
- &cq->resize_umem, ucmd.buf_addr, entries);
+ err = mlx4_ib_get_cq_umem(dev, &cq->resize_buf->buf, &cq->resize_umem,
+ ucmd.buf_addr, entries);
if (err) {
kfree(cq->resize_buf);
cq->resize_buf = NULL;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 230a6ae0ab5a..ae4c91b612ce 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -2099,17 +2099,29 @@ static const struct diag_counter diag_device_only[] = {
DIAG_COUNTER(rq_num_udsdprd, 0x118),
};
-static struct rdma_hw_stats *mlx4_ib_alloc_hw_stats(struct ib_device *ibdev,
- u32 port_num)
+static struct rdma_hw_stats *
+mlx4_ib_alloc_hw_device_stats(struct ib_device *ibdev)
{
struct mlx4_ib_dev *dev = to_mdev(ibdev);
struct mlx4_ib_diag_counters *diag = dev->diag_counters;
- if (!diag[!!port_num].name)
+ if (!diag[0].name)
return NULL;
- return rdma_alloc_hw_stats_struct(diag[!!port_num].name,
- diag[!!port_num].num_counters,
+ return rdma_alloc_hw_stats_struct(diag[0].name, diag[0].num_counters,
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+}
+
+static struct rdma_hw_stats *
+mlx4_ib_alloc_hw_port_stats(struct ib_device *ibdev, u32 port_num)
+{
+ struct mlx4_ib_dev *dev = to_mdev(ibdev);
+ struct mlx4_ib_diag_counters *diag = dev->diag_counters;
+
+ if (!diag[1].name)
+ return NULL;
+
+ return rdma_alloc_hw_stats_struct(diag[1].name, diag[1].num_counters,
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
@@ -2200,7 +2212,8 @@ static void mlx4_ib_fill_diag_counters(struct mlx4_ib_dev *ibdev,
}
static const struct ib_device_ops mlx4_ib_hw_stats_ops = {
- .alloc_hw_stats = mlx4_ib_alloc_hw_stats,
+ .alloc_hw_device_stats = mlx4_ib_alloc_hw_device_stats,
+ .alloc_hw_port_stats = mlx4_ib_alloc_hw_port_stats,
.get_hw_stats = mlx4_ib_get_hw_stats,
};
@@ -2528,6 +2541,7 @@ static const struct ib_device_ops mlx4_ib_dev_ops = {
.destroy_qp = mlx4_ib_destroy_qp,
.destroy_srq = mlx4_ib_destroy_srq,
.detach_mcast = mlx4_ib_mcg_detach,
+ .device_group = &mlx4_attr_group,
.disassociate_ucontext = mlx4_ib_disassociate_ucontext,
.drain_rq = mlx4_ib_drain_rq,
.drain_sq = mlx4_ib_drain_sq,
@@ -2787,7 +2801,6 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
if (mlx4_ib_alloc_diag_counters(ibdev))
goto err_steer_free_bitmap;
- rdma_set_device_sysfs_group(&ibdev->ib_dev, &mlx4_attr_group);
if (ib_register_device(&ibdev->ib_dev, "mlx4_%d",
&dev->persist->pdev->dev))
goto err_diag_counters;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 92ddbcc00eb2..4a2ef7daaded 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -3144,7 +3144,7 @@ static int build_mlx_header(struct mlx4_ib_qp *qp, const struct ib_ud_wr *wr,
mlx->sched_prio = cpu_to_be16(pcp);
ether_addr_copy(sqp->ud_header.eth.smac_h, ah->av.eth.s_mac);
- memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6);
+ ether_addr_copy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac);
memcpy(&ctrl->srcrb_flags16[0], ah->av.eth.mac, 2);
memcpy(&ctrl->imm, ah->av.eth.mac + 2, 4);
@@ -4251,13 +4251,8 @@ int mlx4_ib_modify_wq(struct ib_wq *ibwq, struct ib_wq_attr *wq_attr,
if (wq_attr_mask & IB_WQ_FLAGS)
return -EOPNOTSUPP;
- cur_state = wq_attr_mask & IB_WQ_CUR_STATE ? wq_attr->curr_wq_state :
- ibwq->state;
- new_state = wq_attr_mask & IB_WQ_STATE ? wq_attr->wq_state : cur_state;
-
- if (cur_state < IB_WQS_RESET || cur_state > IB_WQS_ERR ||
- new_state < IB_WQS_RESET || new_state > IB_WQS_ERR)
- return -EINVAL;
+ cur_state = wq_attr->curr_wq_state;
+ new_state = wq_attr->wq_state;
if ((new_state == IB_WQS_RDY) && (cur_state == IB_WQS_ERR))
return -EINVAL;
diff --git a/drivers/infiniband/hw/mlx5/counters.c b/drivers/infiniband/hw/mlx5/counters.c
index e365341057cb..224ba36f2946 100644
--- a/drivers/infiniband/hw/mlx5/counters.c
+++ b/drivers/infiniband/hw/mlx5/counters.c
@@ -161,22 +161,29 @@ u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u32 port_num)
return cnts->set_id;
}
-static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev,
- u32 port_num)
+static struct rdma_hw_stats *
+mlx5_ib_alloc_hw_device_stats(struct ib_device *ibdev)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
- const struct mlx5_ib_counters *cnts;
- bool is_switchdev = is_mdev_switchdev_mode(dev->mdev);
+ const struct mlx5_ib_counters *cnts = &dev->port[0].cnts;
- if ((is_switchdev && port_num) || (!is_switchdev && !port_num))
- return NULL;
+ return rdma_alloc_hw_stats_struct(cnts->names,
+ cnts->num_q_counters +
+ cnts->num_cong_counters +
+ cnts->num_ext_ppcnt_counters,
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+}
- cnts = get_counters(dev, port_num - 1);
+static struct rdma_hw_stats *
+mlx5_ib_alloc_hw_port_stats(struct ib_device *ibdev, u32 port_num)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ const struct mlx5_ib_counters *cnts = &dev->port[port_num - 1].cnts;
return rdma_alloc_hw_stats_struct(cnts->names,
cnts->num_q_counters +
- cnts->num_cong_counters +
- cnts->num_ext_ppcnt_counters,
+ cnts->num_cong_counters +
+ cnts->num_ext_ppcnt_counters,
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
@@ -666,7 +673,17 @@ void mlx5_ib_counters_clear_description(struct ib_counters *counters)
}
static const struct ib_device_ops hw_stats_ops = {
- .alloc_hw_stats = mlx5_ib_alloc_hw_stats,
+ .alloc_hw_port_stats = mlx5_ib_alloc_hw_port_stats,
+ .get_hw_stats = mlx5_ib_get_hw_stats,
+ .counter_bind_qp = mlx5_ib_counter_bind_qp,
+ .counter_unbind_qp = mlx5_ib_counter_unbind_qp,
+ .counter_dealloc = mlx5_ib_counter_dealloc,
+ .counter_alloc_stats = mlx5_ib_counter_alloc_stats,
+ .counter_update_stats = mlx5_ib_counter_update_stats,
+};
+
+static const struct ib_device_ops hw_switchdev_stats_ops = {
+ .alloc_hw_device_stats = mlx5_ib_alloc_hw_device_stats,
.get_hw_stats = mlx5_ib_get_hw_stats,
.counter_bind_qp = mlx5_ib_counter_bind_qp,
.counter_unbind_qp = mlx5_ib_counter_unbind_qp,
@@ -690,7 +707,10 @@ int mlx5_ib_counters_init(struct mlx5_ib_dev *dev)
if (!MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
return 0;
- ib_set_device_ops(&dev->ib_dev, &hw_stats_ops);
+ if (is_mdev_switchdev_mode(dev->mdev))
+ ib_set_device_ops(&dev->ib_dev, &hw_switchdev_stats_ops);
+ else
+ ib_set_device_ops(&dev->ib_dev, &hw_stats_ops);
return mlx5_ib_alloc_counters(dev);
}
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 9ce01f729673..7abeb576b3c5 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -227,7 +227,7 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
wc->dlid_path_bits = cqe->ml_path;
g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
wc->wc_flags |= g ? IB_WC_GRH : 0;
- if (unlikely(is_qp1(qp->ibqp.qp_type))) {
+ if (is_qp1(qp->type)) {
u16 pkey = be32_to_cpu(cqe->pkey) & 0xffff;
ib_find_cached_pkey(&dev->ib_dev, qp->port, pkey,
@@ -725,7 +725,8 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
return -EFAULT;
if ((ucmd.flags & ~(MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD |
- MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX)))
+ MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX |
+ MLX5_IB_CREATE_CQ_FLAGS_REAL_TIME_TS)))
return -EINVAL;
if ((ucmd.cqe_size != 64 && ucmd.cqe_size != 128) ||
@@ -750,7 +751,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
goto err_umem;
}
- err = mlx5_ib_db_map_user(context, udata, ucmd.db_addr, &cq->db);
+ err = mlx5_ib_db_map_user(context, ucmd.db_addr, &cq->db);
if (err)
goto err_umem;
@@ -826,6 +827,9 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
cq->private_flags |= MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD;
}
+ if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_REAL_TIME_TS)
+ cq->private_flags |= MLX5_IB_CQ_PR_FLAGS_REAL_TIME_TS;
+
MLX5_SET(create_cq_in, *cqb, uid, context->devx_uid);
return 0;
diff --git a/drivers/infiniband/hw/mlx5/doorbell.c b/drivers/infiniband/hw/mlx5/doorbell.c
index 7af4df7a6823..9ca2e61807ec 100644
--- a/drivers/infiniband/hw/mlx5/doorbell.c
+++ b/drivers/infiniband/hw/mlx5/doorbell.c
@@ -44,8 +44,7 @@ struct mlx5_ib_user_db_page {
struct mm_struct *mm;
};
-int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context,
- struct ib_udata *udata, unsigned long virt,
+int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
struct mlx5_db *db)
{
struct mlx5_ib_user_db_page *page;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 644d5d0ac544..094c976b1eed 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1816,7 +1816,17 @@ static int set_ucontext_resp(struct ib_ucontext *uctx,
if (MLX5_CAP_GEN(dev->mdev, ece_support))
resp->comp_mask |= MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_ECE;
+ if (rt_supported(MLX5_CAP_GEN(dev->mdev, sq_ts_format)) &&
+ rt_supported(MLX5_CAP_GEN(dev->mdev, rq_ts_format)) &&
+ rt_supported(MLX5_CAP_ROCE(dev->mdev, qp_ts_format)))
+ resp->comp_mask |=
+ MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_REAL_TIME_TS;
+
resp->num_dyn_bfregs = bfregi->num_dyn_bfregs;
+
+ if (MLX5_CAP_GEN(dev->mdev, drain_sigerr))
+ resp->comp_mask |= MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_SQD2RTS;
+
return 0;
}
@@ -3178,8 +3188,6 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
port->mp.mpi = NULL;
- list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
-
spin_unlock(&port->mp.mpi_lock);
err = mlx5_nic_vport_unaffiliate_multiport(mpi->mdev);
@@ -3327,7 +3335,10 @@ static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev)
} else {
mlx5_ib_dbg(dev, "unbinding port_num: %u\n",
i + 1);
- mlx5_ib_unbind_slave_port(dev, dev->port[i].mp.mpi);
+ list_add_tail(&dev->port[i].mp.mpi->list,
+ &mlx5_ib_unaffiliated_port_list);
+ mlx5_ib_unbind_slave_port(dev,
+ dev->port[i].mp.mpi);
}
}
}
@@ -3738,6 +3749,7 @@ static const struct ib_device_ops mlx5_ib_dev_ops = {
.disassociate_ucontext = mlx5_ib_disassociate_ucontext,
.drain_rq = mlx5_ib_drain_rq,
.drain_sq = mlx5_ib_drain_sq,
+ .device_group = &mlx5_attr_group,
.enable_driver = mlx5_ib_enable_driver,
.get_dev_fw_str = get_dev_fw_str,
.get_dma_mr = mlx5_ib_get_dma_mr,
@@ -4025,7 +4037,6 @@ static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
{
const char *name;
- rdma_set_device_sysfs_group(&dev->ib_dev, &mlx5_attr_group);
if (!mlx5_lag_is_roce(dev->mdev))
name = "mlx5_%d";
else
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index e9a3f34a30b8..585fb00bdce8 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -512,7 +512,6 @@ struct mlx5_ib_qp {
/*
* IB/core doesn't store low-level QP types, so
* store both MLX and IBTA types in the field below.
- * IB_QPT_DRIVER will be break to DCI/DCT subtypes.
*/
enum ib_qp_type type;
/* A flag to indicate if there's a new counter is configured
@@ -550,6 +549,7 @@ static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr)
enum mlx5_ib_cq_pr_flags {
MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD = 1 << 0,
+ MLX5_IB_CQ_PR_FLAGS_REAL_TIME_TS = 1 << 1,
};
struct mlx5_ib_cq {
@@ -1198,8 +1198,7 @@ to_mmmap(struct rdma_user_mmap_entry *rdma_entry)
struct mlx5_user_mmap_entry, rdma_entry);
}
-int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context,
- struct ib_udata *udata, unsigned long virt,
+int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
struct mlx5_db *db);
void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
@@ -1265,7 +1264,6 @@ int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
int page_shift, int flags);
int mlx5_ib_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags);
struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
- struct ib_udata *udata,
int access_flags);
void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr);
void mlx5_ib_free_odp_mr(struct mlx5_ib_mr *mr);
@@ -1614,4 +1612,10 @@ static inline bool mlx5_ib_lag_should_assign_affinity(struct mlx5_ib_dev *dev)
(MLX5_CAP_GEN(dev->mdev, num_lag_ports) > 1 &&
MLX5_CAP_GEN(dev->mdev, lag_tx_port_affinity));
}
+
+static inline bool rt_supported(int ts_cap)
+{
+ return ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_REAL_TIME ||
+ ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME;
+}
#endif /* MLX5_IB_H */
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 425423dfac72..3263851ea574 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -68,6 +68,7 @@ static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
struct ib_pd *pd)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ bool ro_pci_enabled = pcie_relaxed_ordering_enabled(dev->mdev->pdev);
MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
@@ -77,10 +78,10 @@ static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
MLX5_SET(mkc, mkc, relaxed_ordering_write,
- !!(acc & IB_ACCESS_RELAXED_ORDERING));
+ (acc & IB_ACCESS_RELAXED_ORDERING) && ro_pci_enabled);
if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read))
MLX5_SET(mkc, mkc, relaxed_ordering_read,
- !!(acc & IB_ACCESS_RELAXED_ORDERING));
+ (acc & IB_ACCESS_RELAXED_ORDERING) && ro_pci_enabled);
MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
MLX5_SET(mkc, mkc, qpn, 0xffffff);
@@ -811,7 +812,8 @@ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
MLX5_SET(mkc, mkc, length64, 1);
- set_mkc_access_pd_addr_fields(mkc, acc, 0, pd);
+ set_mkc_access_pd_addr_fields(mkc, acc | IB_ACCESS_RELAXED_ORDERING, 0,
+ pd);
err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
if (err)
@@ -1510,7 +1512,7 @@ static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length,
if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
return ERR_PTR(-EINVAL);
- mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), udata, access_flags);
+ mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags);
if (IS_ERR(mr))
return ERR_CAST(mr);
return &mr->ibmr;
@@ -2010,7 +2012,7 @@ static void mlx5_set_umr_free_mkey(struct ib_pd *pd, u32 *in, int ndescs,
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
/* This is only used from the kernel, so setting the PD is OK. */
- set_mkc_access_pd_addr_fields(mkc, 0, 0, pd);
+ set_mkc_access_pd_addr_fields(mkc, IB_ACCESS_RELAXED_ORDERING, 0, pd);
MLX5_SET(mkc, mkc, free, 1);
MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
MLX5_SET(mkc, mkc, access_mode_1_0, access_mode & 0x3);
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 1338c11fd121..d0d98e584ebc 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -418,7 +418,7 @@ static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
if (IS_ERR(odp))
return ERR_CAST(odp);
- ret = mr = mlx5_mr_cache_alloc(
+ mr = mlx5_mr_cache_alloc(
mr_to_mdev(imr), MLX5_IMR_MTT_CACHE_ENTRY, imr->access_flags);
if (IS_ERR(mr)) {
ib_umem_odp_release(odp);
@@ -478,7 +478,6 @@ out_mr:
}
struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
- struct ib_udata *udata,
int access_flags)
{
struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device);
@@ -1096,7 +1095,7 @@ static int mlx5_ib_mr_initiator_pfault_handler(
opcode = be32_to_cpu(ctrl->opmod_idx_opcode) &
MLX5_WQE_CTRL_OPCODE_MASK;
- if (qp->ibqp.qp_type == IB_QPT_XRC_INI)
+ if (qp->type == IB_QPT_XRC_INI)
*wqe += sizeof(struct mlx5_wqe_xrc_seg);
if (qp->type == IB_QPT_UD || qp->type == MLX5_IB_QPT_DCI) {
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 9282eb10bfae..a77db29f8391 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -835,7 +835,7 @@ static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
ib_umem_num_pages(rwq->umem), page_size, rwq->rq_num_pas,
offset);
- err = mlx5_ib_db_map_user(ucontext, udata, ucmd->db_addr, &rwq->db);
+ err = mlx5_ib_db_map_user(ucontext, ucmd->db_addr, &rwq->db);
if (err) {
mlx5_ib_dbg(dev, "map failed\n");
goto err_umem;
@@ -961,7 +961,7 @@ static int _create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
resp->bfreg_index = MLX5_IB_INVALID_BFREG;
qp->bfregn = bfregn;
- err = mlx5_ib_db_map_user(context, udata, ucmd->db_addr, &qp->db);
+ err = mlx5_ib_db_map_user(context, ucmd->db_addr, &qp->db);
if (err) {
mlx5_ib_dbg(dev, "map failed\n");
goto err_free;
@@ -1173,69 +1173,79 @@ static void destroy_flow_rule_vport_sq(struct mlx5_ib_sq *sq)
sq->flow_rule = NULL;
}
-static int get_rq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq)
+static bool fr_supported(int ts_cap)
{
- bool fr_supported =
- MLX5_CAP_GEN(dev->mdev, rq_ts_format) ==
- MLX5_RQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING ||
- MLX5_CAP_GEN(dev->mdev, rq_ts_format) ==
- MLX5_RQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME;
+ return ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING ||
+ ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME;
+}
- if (send_cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION) {
- if (!fr_supported) {
- mlx5_ib_dbg(dev, "Free running TS format is not supported\n");
+static int get_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
+ bool fr_sup, bool rt_sup)
+{
+ if (cq->private_flags & MLX5_IB_CQ_PR_FLAGS_REAL_TIME_TS) {
+ if (!rt_sup) {
+ mlx5_ib_dbg(dev,
+ "Real time TS format is not supported\n");
return -EOPNOTSUPP;
}
- return MLX5_RQC_TIMESTAMP_FORMAT_FREE_RUNNING;
+ return MLX5_TIMESTAMP_FORMAT_REAL_TIME;
}
- return fr_supported ? MLX5_RQC_TIMESTAMP_FORMAT_FREE_RUNNING :
- MLX5_RQC_TIMESTAMP_FORMAT_DEFAULT;
+ if (cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION) {
+ if (!fr_sup) {
+ mlx5_ib_dbg(dev,
+ "Free running TS format is not supported\n");
+ return -EOPNOTSUPP;
+ }
+ return MLX5_TIMESTAMP_FORMAT_FREE_RUNNING;
+ }
+ return fr_sup ? MLX5_TIMESTAMP_FORMAT_FREE_RUNNING :
+ MLX5_TIMESTAMP_FORMAT_DEFAULT;
+}
+
+static int get_rq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *recv_cq)
+{
+ u8 ts_cap = MLX5_CAP_GEN(dev->mdev, rq_ts_format);
+
+ return get_ts_format(dev, recv_cq, fr_supported(ts_cap),
+ rt_supported(ts_cap));
}
static int get_sq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq)
{
- bool fr_supported =
- MLX5_CAP_GEN(dev->mdev, sq_ts_format) ==
- MLX5_SQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING ||
- MLX5_CAP_GEN(dev->mdev, sq_ts_format) ==
- MLX5_SQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME;
+ u8 ts_cap = MLX5_CAP_GEN(dev->mdev, sq_ts_format);
- if (send_cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION) {
- if (!fr_supported) {
- mlx5_ib_dbg(dev, "Free running TS format is not supported\n");
- return -EOPNOTSUPP;
- }
- return MLX5_SQC_TIMESTAMP_FORMAT_FREE_RUNNING;
- }
- return fr_supported ? MLX5_SQC_TIMESTAMP_FORMAT_FREE_RUNNING :
- MLX5_SQC_TIMESTAMP_FORMAT_DEFAULT;
+ return get_ts_format(dev, send_cq, fr_supported(ts_cap),
+ rt_supported(ts_cap));
}
static int get_qp_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq,
struct mlx5_ib_cq *recv_cq)
{
- bool fr_supported =
- MLX5_CAP_ROCE(dev->mdev, qp_ts_format) ==
- MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING ||
- MLX5_CAP_ROCE(dev->mdev, qp_ts_format) ==
- MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME;
- int ts_format = fr_supported ? MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING :
- MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT;
-
- if (recv_cq &&
- recv_cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION)
- ts_format = MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING;
-
- if (send_cq &&
- send_cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION)
- ts_format = MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING;
-
- if (ts_format == MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING &&
- !fr_supported) {
- mlx5_ib_dbg(dev, "Free running TS format is not supported\n");
+ u8 ts_cap = MLX5_CAP_ROCE(dev->mdev, qp_ts_format);
+ bool fr_sup = fr_supported(ts_cap);
+ bool rt_sup = rt_supported(ts_cap);
+ u8 default_ts = fr_sup ? MLX5_TIMESTAMP_FORMAT_FREE_RUNNING :
+ MLX5_TIMESTAMP_FORMAT_DEFAULT;
+ int send_ts_format =
+ send_cq ? get_ts_format(dev, send_cq, fr_sup, rt_sup) :
+ default_ts;
+ int recv_ts_format =
+ recv_cq ? get_ts_format(dev, recv_cq, fr_sup, rt_sup) :
+ default_ts;
+
+ if (send_ts_format < 0 || recv_ts_format < 0)
+ return -EOPNOTSUPP;
+
+ if (send_ts_format != MLX5_TIMESTAMP_FORMAT_DEFAULT &&
+ recv_ts_format != MLX5_TIMESTAMP_FORMAT_DEFAULT &&
+ send_ts_format != recv_ts_format) {
+ mlx5_ib_dbg(
+ dev,
+ "The send ts_format does not match the receive ts_format\n");
return -EOPNOTSUPP;
}
- return ts_format;
+
+ return send_ts_format == default_ts ? recv_ts_format : send_ts_format;
}
static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
@@ -3089,7 +3099,7 @@ int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
struct mlx5_ib_dev *dev = to_mdev(qp->device);
struct mlx5_ib_qp *mqp = to_mqp(qp);
- if (unlikely(qp->qp_type == IB_QPT_GSI))
+ if (mqp->type == IB_QPT_GSI)
return mlx5_ib_destroy_gsi(mqp);
if (mqp->type == MLX5_IB_QPT_DCT)
@@ -3128,7 +3138,7 @@ static int set_qpc_atomic_flags(struct mlx5_ib_qp *qp,
if (access_flags & IB_ACCESS_REMOTE_ATOMIC) {
int atomic_mode;
- atomic_mode = get_atomic_mode(dev, qp->ibqp.qp_type);
+ atomic_mode = get_atomic_mode(dev, qp->type);
if (atomic_mode < 0)
return -EOPNOTSUPP;
@@ -3300,10 +3310,10 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
ether_addr_copy(MLX5_ADDR_OF(ads, path, rmac_47_32),
ah->roce.dmac);
- if ((qp->ibqp.qp_type == IB_QPT_RC ||
- qp->ibqp.qp_type == IB_QPT_UC ||
- qp->ibqp.qp_type == IB_QPT_XRC_INI ||
- qp->ibqp.qp_type == IB_QPT_XRC_TGT) &&
+ if ((qp->type == IB_QPT_RC ||
+ qp->type == IB_QPT_UC ||
+ qp->type == IB_QPT_XRC_INI ||
+ qp->type == IB_QPT_XRC_TGT) &&
(grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) &&
(attr_mask & IB_QP_DEST_QPN))
mlx5_set_path_udp_sport(path, ah,
@@ -3342,7 +3352,7 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
MLX5_SET(ads, path, ack_timeout,
alt ? attr->alt_timeout : attr->timeout);
- if ((qp->ibqp.qp_type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt)
+ if ((qp->type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt)
return modify_raw_packet_eth_prio(dev->mdev,
&qp->raw_packet_qp.sq,
sl & 0xf, qp->ibqp.pd);
@@ -3453,6 +3463,17 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
MLX5_QP_OPTPAR_RRE,
},
},
+ [MLX5_QP_STATE_SQD] = {
+ [MLX5_QP_STATE_RTS] = {
+ [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
+ [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY,
+ [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE,
+ [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RNR_TIMEOUT |
+ MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_RAE |
+ MLX5_QP_OPTPAR_RRE,
+ },
+ },
};
static int ib_nr_to_mlx5_nr(int ib_mask)
@@ -3848,6 +3869,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
[MLX5_QP_STATE_SQD] = {
[MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
[MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
+ [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_SQD_RTS_QP,
},
[MLX5_QP_STATE_SQER] = {
[MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
@@ -3910,12 +3932,12 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
MLX5_CAP_GEN(dev->mdev, init2_lag_tx_port_affinity))
optpar |= MLX5_QP_OPTPAR_LAG_TX_AFF;
- if (is_sqp(ibqp->qp_type)) {
+ if (is_sqp(qp->type)) {
MLX5_SET(qpc, qpc, mtu, IB_MTU_256);
MLX5_SET(qpc, qpc, log_msg_max, 8);
- } else if ((ibqp->qp_type == IB_QPT_UD &&
+ } else if ((qp->type == IB_QPT_UD &&
!(qp->flags & IB_QP_CREATE_SOURCE_QPN)) ||
- ibqp->qp_type == MLX5_IB_QPT_REG_UMR) {
+ qp->type == MLX5_IB_QPT_REG_UMR) {
MLX5_SET(qpc, qpc, mtu, IB_MTU_4096);
MLX5_SET(qpc, qpc, log_msg_max, 12);
} else if (attr_mask & IB_QP_PATH_MTU) {
@@ -3941,7 +3963,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
/* todo implement counter_index functionality */
- if (is_sqp(ibqp->qp_type))
+ if (is_sqp(qp->type))
MLX5_SET(ads, pri_path, vhca_port_num, qp->port);
if (attr_mask & IB_QP_PORT)
@@ -3969,7 +3991,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
goto out;
}
- get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq,
+ get_cqs(qp->type, qp->ibqp.send_cq, qp->ibqp.recv_cq,
&send_cq, &recv_cq);
MLX5_SET(qpc, qpc, pd, pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn);
@@ -4048,7 +4070,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
optpar |= ib_mask_to_mlx5_opt(attr_mask);
optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
- if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
+ if (qp->type == IB_QPT_RAW_PACKET ||
qp->flags & IB_QP_CREATE_SOURCE_QPN) {
struct mlx5_modify_raw_qp_param raw_qp_param = {};
@@ -4121,7 +4143,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
* entries and reinitialize the QP.
*/
if (new_state == IB_QPS_RESET &&
- !ibqp->uobject && ibqp->qp_type != IB_QPT_XRC_TGT) {
+ !ibqp->uobject && qp->type != IB_QPT_XRC_TGT) {
mlx5_ib_cq_clean(recv_cq, base->mqp.qpn,
ibqp->srq ? to_msrq(ibqp->srq) : NULL);
if (send_cq != recv_cq)
@@ -4314,13 +4336,12 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
}
static bool mlx5_ib_modify_qp_allowed(struct mlx5_ib_dev *dev,
- struct mlx5_ib_qp *qp,
- enum ib_qp_type qp_type)
+ struct mlx5_ib_qp *qp)
{
if (dev->profile != &raw_eth_profile)
return true;
- if (qp_type == IB_QPT_RAW_PACKET || qp_type == MLX5_IB_QPT_REG_UMR)
+ if (qp->type == IB_QPT_RAW_PACKET || qp->type == MLX5_IB_QPT_REG_UMR)
return true;
/* Internal QP used for wc testing, with NOPs in wq */
@@ -4341,7 +4362,7 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
enum ib_qp_state cur_state, new_state;
int err = -EINVAL;
- if (!mlx5_ib_modify_qp_allowed(dev, qp, ibqp->qp_type))
+ if (!mlx5_ib_modify_qp_allowed(dev, qp))
return -EOPNOTSUPP;
if (attr_mask & ~(IB_QP_ATTR_STANDARD_BITS | IB_QP_RATE_LIMIT))
@@ -4370,11 +4391,10 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
}
- if (unlikely(ibqp->qp_type == IB_QPT_GSI))
+ if (qp->type == IB_QPT_GSI)
return mlx5_ib_gsi_modify_qp(ibqp, attr, attr_mask);
- qp_type = (unlikely(ibqp->qp_type == MLX5_IB_QPT_HW_GSI)) ? IB_QPT_GSI :
- qp->type;
+ qp_type = (qp->type == MLX5_IB_QPT_HW_GSI) ? IB_QPT_GSI : qp->type;
if (qp_type == MLX5_IB_QPT_DCT)
return mlx5_ib_modify_dct(ibqp, attr, attr_mask, &ucmd, udata);
@@ -4395,7 +4415,7 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
!ib_modify_qp_is_ok(cur_state, new_state, qp_type,
attr_mask)) {
mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
- cur_state, new_state, ibqp->qp_type, attr_mask);
+ cur_state, new_state, qp->type, attr_mask);
goto out;
} else if (qp_type == MLX5_IB_QPT_DCI &&
!modify_dci_qp_is_ok(cur_state, new_state, attr_mask)) {
@@ -4668,9 +4688,8 @@ static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
pri_path = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
alt_path = MLX5_ADDR_OF(qpc, qpc, secondary_address_path);
- if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC ||
- qp->ibqp.qp_type == IB_QPT_XRC_INI ||
- qp->ibqp.qp_type == IB_QPT_XRC_TGT) {
+ if (qp->type == IB_QPT_RC || qp->type == IB_QPT_UC ||
+ qp->type == IB_QPT_XRC_INI || qp->type == IB_QPT_XRC_TGT) {
to_rdma_ah_attr(dev, &qp_attr->ah_attr, pri_path);
to_rdma_ah_attr(dev, &qp_attr->alt_ah_attr, alt_path);
qp_attr->alt_pkey_index = MLX5_GET(ads, alt_path, pkey_index);
@@ -4763,7 +4782,7 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
if (ibqp->rwq_ind_tbl)
return -ENOSYS;
- if (unlikely(ibqp->qp_type == IB_QPT_GSI))
+ if (qp->type == IB_QPT_GSI)
return mlx5_ib_gsi_query_qp(ibqp, qp_attr, qp_attr_mask,
qp_init_attr);
@@ -4777,7 +4796,7 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
mutex_lock(&qp->mutex);
- if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
+ if (qp->type == IB_QPT_RAW_PACKET ||
qp->flags & IB_QP_CREATE_SOURCE_QPN) {
err = query_raw_packet_qp_state(dev, qp, &raw_packet_qp_state);
if (err)
@@ -4804,7 +4823,7 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
qp_attr->cap.max_send_sge = 0;
}
- qp_init_attr->qp_type = ibqp->qp_type;
+ qp_init_attr->qp_type = qp->type;
qp_init_attr->recv_cq = ibqp->recv_cq;
qp_init_attr->send_cq = ibqp->send_cq;
qp_init_attr->srq = ibqp->srq;
@@ -5309,10 +5328,8 @@ int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
- curr_wq_state = (wq_attr_mask & IB_WQ_CUR_STATE) ?
- wq_attr->curr_wq_state : wq->state;
- wq_state = (wq_attr_mask & IB_WQ_STATE) ?
- wq_attr->wq_state : curr_wq_state;
+ curr_wq_state = wq_attr->curr_wq_state;
+ wq_state = wq_attr->wq_state;
if (curr_wq_state == IB_WQS_ERR)
curr_wq_state = MLX5_RQC_STATE_ERR;
if (wq_state == IB_WQS_ERR)
diff --git a/drivers/infiniband/hw/mlx5/qpc.c b/drivers/infiniband/hw/mlx5/qpc.c
index c683d7000168..8844eacf2380 100644
--- a/drivers/infiniband/hw/mlx5/qpc.c
+++ b/drivers/infiniband/hw/mlx5/qpc.c
@@ -441,6 +441,12 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
MOD_QP_IN_SET_QPC(sqerr2rts_qp, mbox->in, opcode, qpn,
opt_param_mask, qpc, uid);
break;
+ case MLX5_CMD_OP_SQD_RTS_QP:
+ if (MBOX_ALLOC(mbox, sqd2rts_qp))
+ return -ENOMEM;
+ MOD_QP_IN_SET_QPC(sqd2rts_qp, mbox->in, opcode, qpn,
+ opt_param_mask, qpc, uid);
+ break;
case MLX5_CMD_OP_INIT2INIT_QP:
if (MBOX_ALLOC(mbox, init2init_qp))
return -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index fab6736e4d6a..191c4ee7db62 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -84,7 +84,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
}
in->umem = srq->umem;
- err = mlx5_ib_db_map_user(ucontext, udata, ucmd.db_addr, &srq->db);
+ err = mlx5_ib_db_map_user(ucontext, ucmd.db_addr, &srq->db);
if (err) {
mlx5_ib_dbg(dev, "map doorbell failed\n");
goto err_umem;
diff --git a/drivers/infiniband/hw/mlx5/wr.c b/drivers/infiniband/hw/mlx5/wr.c
index cf2852cba45c..8841620af82f 100644
--- a/drivers/infiniband/hw/mlx5/wr.c
+++ b/drivers/infiniband/hw/mlx5/wr.c
@@ -866,7 +866,10 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC;
u8 flags = 0;
- /* Matches access in mlx5_set_umr_free_mkey() */
+ /* Matches access in mlx5_set_umr_free_mkey().
+ * Relaxed Ordering is set implicitly in mlx5_set_umr_free_mkey() and
+ * kernel ULPs are not aware of it, so we don't set it here.
+ */
if (!mlx5_ib_can_reconfig_with_umr(dev, 0, wr->access)) {
mlx5_ib_warn(
to_mdev(qp->ibqp.device),
@@ -1278,7 +1281,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */
struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
struct mlx5_core_dev *mdev = dev->mdev;
- struct mlx5_ib_qp *qp;
+ struct mlx5_ib_qp *qp = to_mqp(ibqp);
struct mlx5_wqe_xrc_seg *xrc;
struct mlx5_bf *bf;
void *cur_edge;
@@ -1299,10 +1302,9 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
return -EIO;
}
- if (unlikely(ibqp->qp_type == IB_QPT_GSI))
+ if (qp->type == IB_QPT_GSI)
return mlx5_ib_gsi_post_send(ibqp, wr, bad_wr);
- qp = to_mqp(ibqp);
bf = &qp->bf;
spin_lock_irqsave(&qp->sq.lock, flags);
@@ -1347,7 +1349,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
}
}
- switch (ibqp->qp_type) {
+ switch (qp->type) {
case IB_QPT_XRC_INI:
xrc = seg;
seg += sizeof(*xrc);
@@ -1476,7 +1478,7 @@ int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
return -EIO;
}
- if (unlikely(ibqp->qp_type == IB_QPT_GSI))
+ if (qp->type == IB_QPT_GSI)
return mlx5_ib_gsi_post_recv(ibqp, wr, bad_wr);
spin_lock_irqsave(&qp->rq.lock, flags);
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 522bb606120e..adf4fcf0fee4 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -1099,6 +1099,7 @@ static const struct ib_device_ops mthca_dev_ops = {
.destroy_cq = mthca_destroy_cq,
.destroy_qp = mthca_destroy_qp,
.detach_mcast = mthca_multicast_detach,
+ .device_group = &mthca_attr_group,
.get_dev_fw_str = get_dev_fw_str,
.get_dma_mr = mthca_get_dma_mr,
.get_port_immutable = mthca_port_immutable,
@@ -1186,7 +1187,6 @@ int mthca_register_device(struct mthca_dev *dev)
mutex_init(&dev->cap_mask_mutex);
- rdma_set_device_sysfs_group(&dev->ib_dev, &mthca_attr_group);
ret = ib_register_device(&dev->ib_dev, "mthca%d", &dev->pdev->dev);
if (ret)
return ret;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 4882b3156edb..f329db0c591f 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -161,6 +161,7 @@ static const struct ib_device_ops ocrdma_dev_ops = {
.destroy_ah = ocrdma_destroy_ah,
.destroy_cq = ocrdma_destroy_cq,
.destroy_qp = ocrdma_destroy_qp,
+ .device_group = &ocrdma_attr_group,
.get_dev_fw_str = get_dev_fw_str,
.get_dma_mr = ocrdma_get_dma_mr,
.get_link_layer = ocrdma_link_layer,
@@ -218,7 +219,6 @@ static int ocrdma_register_device(struct ocrdma_dev *dev)
if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R)
ib_set_device_ops(&dev->ibdev, &ocrdma_dev_srq_ops);
- rdma_set_device_sysfs_group(&dev->ibdev, &ocrdma_attr_group);
ret = ib_device_set_netdev(&dev->ibdev, dev->nic_info.netdev, 1);
if (ret)
return ret;
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index 8334a9850220..de98e0604f91 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -208,6 +208,7 @@ static const struct ib_device_ops qedr_dev_ops = {
.destroy_cq = qedr_destroy_cq,
.destroy_qp = qedr_destroy_qp,
.destroy_srq = qedr_destroy_srq,
+ .device_group = &qedr_attr_group,
.get_dev_fw_str = qedr_get_dev_fw_str,
.get_dma_mr = qedr_get_dma_mr,
.get_link_layer = qedr_link_layer,
@@ -256,7 +257,6 @@ static int qedr_register_device(struct qedr_dev *dev)
dev->ibdev.num_comp_vectors = dev->num_cnq;
dev->ibdev.dev.parent = &dev->pdev->dev;
- rdma_set_device_sysfs_group(&dev->ibdev, &qedr_attr_group);
ib_set_device_ops(&dev->ibdev, &qedr_dev_ops);
rc = ib_device_set_netdev(&dev->ibdev, dev->ndev, 1);
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 88497739029e..9363bccfc6e7 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -521,10 +521,6 @@ struct qib_pportdata {
struct qib_devdata *dd;
struct qib_chippport_specific *cpspec; /* chip-specific per-port */
- struct kobject pport_kobj;
- struct kobject pport_cc_kobj;
- struct kobject sl2vl_kobj;
- struct kobject diagc_kobj;
/* GUID for this interface, in network order */
__be64 guid;
@@ -1365,13 +1361,11 @@ static inline u32 qib_get_rcvhdrtail(const struct qib_ctxtdata *rcd)
extern const char ib_qib_version[];
extern const struct attribute_group qib_attr_group;
+extern const struct attribute_group *qib_attr_port_groups[];
int qib_device_create(struct qib_devdata *);
void qib_device_remove(struct qib_devdata *);
-int qib_create_port_files(struct ib_device *ibdev, u32 port_num,
- struct kobject *kobj);
-void qib_verbs_unregister_sysfs(struct qib_devdata *);
/* Hook for sysfs read of QSFP */
extern int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len);
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index 5e9e66f27064..d57e49de6650 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -32,25 +32,38 @@
* SOFTWARE.
*/
#include <linux/ctype.h>
+#include <rdma/ib_sysfs.h>
#include "qib.h"
#include "qib_mad.h"
-/* start of per-port functions */
+static struct qib_pportdata *qib_get_pportdata_kobj(struct kobject *kobj)
+{
+ u32 port_num;
+ struct ib_device *ibdev = ib_port_sysfs_get_ibdev_kobj(kobj, &port_num);
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+
+ return &dd->pport[port_num - 1];
+}
+
/*
* Get/Set heartbeat enable. OR of 1=enabled, 2=auto
*/
-static ssize_t show_hrtbt_enb(struct qib_pportdata *ppd, char *buf)
+static ssize_t hrtbt_enable_show(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *attr, char *buf)
{
- struct qib_devdata *dd = ppd->dd;
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ struct qib_pportdata *ppd = &dd->pport[port_num - 1];
return sysfs_emit(buf, "%d\n", dd->f_get_ib_cfg(ppd, QIB_IB_CFG_HRTBT));
}
-static ssize_t store_hrtbt_enb(struct qib_pportdata *ppd, const char *buf,
- size_t count)
+static ssize_t hrtbt_enable_store(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *attr,
+ const char *buf, size_t count)
{
- struct qib_devdata *dd = ppd->dd;
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ struct qib_pportdata *ppd = &dd->pport[port_num - 1];
int ret;
u16 val;
@@ -70,11 +83,14 @@ static ssize_t store_hrtbt_enb(struct qib_pportdata *ppd, const char *buf,
ret = dd->f_set_ib_cfg(ppd, QIB_IB_CFG_HRTBT, val);
return ret < 0 ? ret : count;
}
+static IB_PORT_ATTR_RW(hrtbt_enable);
-static ssize_t store_loopback(struct qib_pportdata *ppd, const char *buf,
+static ssize_t loopback_store(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *attr, const char *buf,
size_t count)
{
- struct qib_devdata *dd = ppd->dd;
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ struct qib_pportdata *ppd = &dd->pport[port_num - 1];
int ret = count, r;
r = dd->f_set_ib_loopback(ppd, buf);
@@ -83,11 +99,14 @@ static ssize_t store_loopback(struct qib_pportdata *ppd, const char *buf,
return ret;
}
+static IB_PORT_ATTR_WO(loopback);
-static ssize_t store_led_override(struct qib_pportdata *ppd, const char *buf,
- size_t count)
+static ssize_t led_override_store(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *attr,
+ const char *buf, size_t count)
{
- struct qib_devdata *dd = ppd->dd;
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ struct qib_pportdata *ppd = &dd->pport[port_num - 1];
int ret;
u16 val;
@@ -100,14 +119,20 @@ static ssize_t store_led_override(struct qib_pportdata *ppd, const char *buf,
qib_set_led_override(ppd, val);
return count;
}
+static IB_PORT_ATTR_WO(led_override);
-static ssize_t show_status(struct qib_pportdata *ppd, char *buf)
+static ssize_t status_show(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *attr, char *buf)
{
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ struct qib_pportdata *ppd = &dd->pport[port_num - 1];
+
if (!ppd->statusp)
return -EINVAL;
return sysfs_emit(buf, "0x%llx\n", (unsigned long long)*(ppd->statusp));
}
+static IB_PORT_ATTR_RO(status);
/*
* For userland compatibility, these offsets must remain fixed.
@@ -127,8 +152,11 @@ static const char * const qib_status_str[] = {
NULL,
};
-static ssize_t show_status_str(struct qib_pportdata *ppd, char *buf)
+static ssize_t status_str_show(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *attr, char *buf)
{
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ struct qib_pportdata *ppd = &dd->pport[port_num - 1];
int i, any;
u64 s;
ssize_t ret;
@@ -160,38 +188,22 @@ static ssize_t show_status_str(struct qib_pportdata *ppd, char *buf)
bail:
return ret;
}
+static IB_PORT_ATTR_RO(status_str);
/* end of per-port functions */
-/*
- * Start of per-port file structures and support code
- * Because we are fitting into other infrastructure, we have to supply the
- * full set of kobject/sysfs_ops structures and routines.
- */
-#define QIB_PORT_ATTR(name, mode, show, store) \
- static struct qib_port_attr qib_port_attr_##name = \
- __ATTR(name, mode, show, store)
-
-struct qib_port_attr {
- struct attribute attr;
- ssize_t (*show)(struct qib_pportdata *, char *);
- ssize_t (*store)(struct qib_pportdata *, const char *, size_t);
+static struct attribute *port_linkcontrol_attributes[] = {
+ &ib_port_attr_loopback.attr,
+ &ib_port_attr_led_override.attr,
+ &ib_port_attr_hrtbt_enable.attr,
+ &ib_port_attr_status.attr,
+ &ib_port_attr_status_str.attr,
+ NULL
};
-QIB_PORT_ATTR(loopback, S_IWUSR, NULL, store_loopback);
-QIB_PORT_ATTR(led_override, S_IWUSR, NULL, store_led_override);
-QIB_PORT_ATTR(hrtbt_enable, S_IWUSR | S_IRUGO, show_hrtbt_enb,
- store_hrtbt_enb);
-QIB_PORT_ATTR(status, S_IRUGO, show_status, NULL);
-QIB_PORT_ATTR(status_str, S_IRUGO, show_status_str, NULL);
-
-static struct attribute *port_default_attributes[] = {
- &qib_port_attr_loopback.attr,
- &qib_port_attr_led_override.attr,
- &qib_port_attr_hrtbt_enable.attr,
- &qib_port_attr_status.attr,
- &qib_port_attr_status_str.attr,
- NULL
+static const struct attribute_group port_linkcontrol_group = {
+ .name = "linkcontrol",
+ .attrs = port_linkcontrol_attributes,
};
/*
@@ -201,13 +213,12 @@ static struct attribute *port_default_attributes[] = {
/*
* Congestion control table size followed by table entries
*/
-static ssize_t read_cc_table_bin(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t pos, size_t count)
+static ssize_t cc_table_bin_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t pos, size_t count)
{
+ struct qib_pportdata *ppd = qib_get_pportdata_kobj(kobj);
int ret;
- struct qib_pportdata *ppd =
- container_of(kobj, struct qib_pportdata, pport_cc_kobj);
if (!qib_cc_table_size || !ppd->ccti_entries_shadow)
return -EINVAL;
@@ -230,34 +241,19 @@ static ssize_t read_cc_table_bin(struct file *filp, struct kobject *kobj,
return count;
}
-
-static void qib_port_release(struct kobject *kobj)
-{
- /* nothing to do since memory is freed by qib_free_devdata() */
-}
-
-static struct kobj_type qib_port_cc_ktype = {
- .release = qib_port_release,
-};
-
-static const struct bin_attribute cc_table_bin_attr = {
- .attr = {.name = "cc_table_bin", .mode = 0444},
- .read = read_cc_table_bin,
- .size = PAGE_SIZE,
-};
+static BIN_ATTR_RO(cc_table_bin, PAGE_SIZE);
/*
* Congestion settings: port control, control map and an array of 16
* entries for the congestion entries - increase, timer, event log
* trigger threshold and the minimum injection rate delay.
*/
-static ssize_t read_cc_setting_bin(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t pos, size_t count)
+static ssize_t cc_setting_bin_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t pos, size_t count)
{
+ struct qib_pportdata *ppd = qib_get_pportdata_kobj(kobj);
int ret;
- struct qib_pportdata *ppd =
- container_of(kobj, struct qib_pportdata, pport_cc_kobj);
if (!qib_cc_table_size || !ppd->congestion_entries_shadow)
return -EINVAL;
@@ -278,67 +274,54 @@ static ssize_t read_cc_setting_bin(struct file *filp, struct kobject *kobj,
return count;
}
+static BIN_ATTR_RO(cc_setting_bin, PAGE_SIZE);
-static const struct bin_attribute cc_setting_bin_attr = {
- .attr = {.name = "cc_settings_bin", .mode = 0444},
- .read = read_cc_setting_bin,
- .size = PAGE_SIZE,
+static struct bin_attribute *port_ccmgta_attributes[] = {
+ &bin_attr_cc_setting_bin,
+ &bin_attr_cc_table_bin,
+ NULL,
};
-
-static ssize_t qib_portattr_show(struct kobject *kobj,
- struct attribute *attr, char *buf)
+static umode_t qib_ccmgta_is_bin_visible(struct kobject *kobj,
+ struct bin_attribute *attr, int n)
{
- struct qib_port_attr *pattr =
- container_of(attr, struct qib_port_attr, attr);
- struct qib_pportdata *ppd =
- container_of(kobj, struct qib_pportdata, pport_kobj);
-
- if (!pattr->show)
- return -EIO;
-
- return pattr->show(ppd, buf);
-}
+ struct qib_pportdata *ppd = qib_get_pportdata_kobj(kobj);
-static ssize_t qib_portattr_store(struct kobject *kobj,
- struct attribute *attr, const char *buf, size_t len)
-{
- struct qib_port_attr *pattr =
- container_of(attr, struct qib_port_attr, attr);
- struct qib_pportdata *ppd =
- container_of(kobj, struct qib_pportdata, pport_kobj);
-
- if (!pattr->store)
- return -EIO;
-
- return pattr->store(ppd, buf, len);
+ if (!qib_cc_table_size || !ppd->congestion_entries_shadow)
+ return 0;
+ return attr->attr.mode;
}
-
-static const struct sysfs_ops qib_port_ops = {
- .show = qib_portattr_show,
- .store = qib_portattr_store,
-};
-
-static struct kobj_type qib_port_ktype = {
- .release = qib_port_release,
- .sysfs_ops = &qib_port_ops,
- .default_attrs = port_default_attributes
+static const struct attribute_group port_ccmgta_attribute_group = {
+ .name = "CCMgtA",
+ .is_bin_visible = qib_ccmgta_is_bin_visible,
+ .bin_attrs = port_ccmgta_attributes,
};
/* Start sl2vl */
-#define QIB_SL2VL_ATTR(N) \
- static struct qib_sl2vl_attr qib_sl2vl_attr_##N = { \
- .attr = { .name = __stringify(N), .mode = 0444 }, \
- .sl = N \
- }
-
struct qib_sl2vl_attr {
- struct attribute attr;
+ struct ib_port_attribute attr;
int sl;
};
+static ssize_t sl2vl_attr_show(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *attr, char *buf)
+{
+ struct qib_sl2vl_attr *sattr =
+ container_of(attr, struct qib_sl2vl_attr, attr);
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data;
+
+ return sysfs_emit(buf, "%u\n", qibp->sl_to_vl[sattr->sl]);
+}
+
+#define QIB_SL2VL_ATTR(N) \
+ static struct qib_sl2vl_attr qib_sl2vl_attr_##N = { \
+ .attr = __ATTR(N, 0444, sl2vl_attr_show, NULL), \
+ .sl = N, \
+ }
+
QIB_SL2VL_ATTR(0);
QIB_SL2VL_ATTR(1);
QIB_SL2VL_ATTR(2);
@@ -356,72 +339,74 @@ QIB_SL2VL_ATTR(13);
QIB_SL2VL_ATTR(14);
QIB_SL2VL_ATTR(15);
-static struct attribute *sl2vl_default_attributes[] = {
- &qib_sl2vl_attr_0.attr,
- &qib_sl2vl_attr_1.attr,
- &qib_sl2vl_attr_2.attr,
- &qib_sl2vl_attr_3.attr,
- &qib_sl2vl_attr_4.attr,
- &qib_sl2vl_attr_5.attr,
- &qib_sl2vl_attr_6.attr,
- &qib_sl2vl_attr_7.attr,
- &qib_sl2vl_attr_8.attr,
- &qib_sl2vl_attr_9.attr,
- &qib_sl2vl_attr_10.attr,
- &qib_sl2vl_attr_11.attr,
- &qib_sl2vl_attr_12.attr,
- &qib_sl2vl_attr_13.attr,
- &qib_sl2vl_attr_14.attr,
- &qib_sl2vl_attr_15.attr,
+static struct attribute *port_sl2vl_attributes[] = {
+ &qib_sl2vl_attr_0.attr.attr,
+ &qib_sl2vl_attr_1.attr.attr,
+ &qib_sl2vl_attr_2.attr.attr,
+ &qib_sl2vl_attr_3.attr.attr,
+ &qib_sl2vl_attr_4.attr.attr,
+ &qib_sl2vl_attr_5.attr.attr,
+ &qib_sl2vl_attr_6.attr.attr,
+ &qib_sl2vl_attr_7.attr.attr,
+ &qib_sl2vl_attr_8.attr.attr,
+ &qib_sl2vl_attr_9.attr.attr,
+ &qib_sl2vl_attr_10.attr.attr,
+ &qib_sl2vl_attr_11.attr.attr,
+ &qib_sl2vl_attr_12.attr.attr,
+ &qib_sl2vl_attr_13.attr.attr,
+ &qib_sl2vl_attr_14.attr.attr,
+ &qib_sl2vl_attr_15.attr.attr,
NULL
};
-static ssize_t sl2vl_attr_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct qib_sl2vl_attr *sattr =
- container_of(attr, struct qib_sl2vl_attr, attr);
- struct qib_pportdata *ppd =
- container_of(kobj, struct qib_pportdata, sl2vl_kobj);
- struct qib_ibport *qibp = &ppd->ibport_data;
-
- return sysfs_emit(buf, "%u\n", qibp->sl_to_vl[sattr->sl]);
-}
-
-static const struct sysfs_ops qib_sl2vl_ops = {
- .show = sl2vl_attr_show,
-};
-
-static struct kobj_type qib_sl2vl_ktype = {
- .release = qib_port_release,
- .sysfs_ops = &qib_sl2vl_ops,
- .default_attrs = sl2vl_default_attributes
+static const struct attribute_group port_sl2vl_group = {
+ .name = "sl2vl",
+ .attrs = port_sl2vl_attributes,
};
/* End sl2vl */
/* Start diag_counters */
-#define QIB_DIAGC_ATTR(N) \
- static struct qib_diagc_attr qib_diagc_attr_##N = { \
- .attr = { .name = __stringify(N), .mode = 0664 }, \
- .counter = offsetof(struct qib_ibport, rvp.n_##N) \
- }
-
-#define QIB_DIAGC_ATTR_PER_CPU(N) \
- static struct qib_diagc_attr qib_diagc_attr_##N = { \
- .attr = { .name = __stringify(N), .mode = 0664 }, \
- .counter = offsetof(struct qib_ibport, rvp.z_##N) \
- }
-
struct qib_diagc_attr {
- struct attribute attr;
+ struct ib_port_attribute attr;
size_t counter;
};
-QIB_DIAGC_ATTR_PER_CPU(rc_acks);
-QIB_DIAGC_ATTR_PER_CPU(rc_qacks);
-QIB_DIAGC_ATTR_PER_CPU(rc_delayed_comp);
+static ssize_t diagc_attr_show(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *attr, char *buf)
+{
+ struct qib_diagc_attr *dattr =
+ container_of(attr, struct qib_diagc_attr, attr);
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data;
+
+ return sysfs_emit(buf, "%llu\n", *((u64 *)qibp + dattr->counter));
+}
+
+static ssize_t diagc_attr_store(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct qib_diagc_attr *dattr =
+ container_of(attr, struct qib_diagc_attr, attr);
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data;
+ u64 val;
+ int ret;
+
+ ret = kstrtou64(buf, 0, &val);
+ if (ret)
+ return ret;
+ *((u64 *)qibp + dattr->counter) = val;
+ return count;
+}
+
+#define QIB_DIAGC_ATTR(N) \
+ static struct qib_diagc_attr qib_diagc_attr_##N = { \
+ .attr = __ATTR(N, 0664, diagc_attr_show, diagc_attr_store), \
+ .counter = &((struct qib_ibport *)0)->rvp.n_##N - (u64 *)0, \
+ }
QIB_DIAGC_ATTR(rc_resends);
QIB_DIAGC_ATTR(seq_naks);
@@ -437,26 +422,6 @@ QIB_DIAGC_ATTR(rc_dupreq);
QIB_DIAGC_ATTR(rc_seqnak);
QIB_DIAGC_ATTR(rc_crwaits);
-static struct attribute *diagc_default_attributes[] = {
- &qib_diagc_attr_rc_resends.attr,
- &qib_diagc_attr_rc_acks.attr,
- &qib_diagc_attr_rc_qacks.attr,
- &qib_diagc_attr_rc_delayed_comp.attr,
- &qib_diagc_attr_seq_naks.attr,
- &qib_diagc_attr_rdma_seq.attr,
- &qib_diagc_attr_rnr_naks.attr,
- &qib_diagc_attr_other_naks.attr,
- &qib_diagc_attr_rc_timeouts.attr,
- &qib_diagc_attr_loop_pkts.attr,
- &qib_diagc_attr_pkt_drops.attr,
- &qib_diagc_attr_dmawait.attr,
- &qib_diagc_attr_unaligned.attr,
- &qib_diagc_attr_rc_dupreq.attr,
- &qib_diagc_attr_rc_seqnak.attr,
- &qib_diagc_attr_rc_crwaits.attr,
- NULL
-};
-
static u64 get_all_cpu_total(u64 __percpu *cntr)
{
int cpu;
@@ -467,86 +432,127 @@ static u64 get_all_cpu_total(u64 __percpu *cntr)
return counter;
}
-#define def_write_per_cpu(cntr) \
-static void write_per_cpu_##cntr(struct qib_pportdata *ppd, u32 data) \
-{ \
- struct qib_devdata *dd = ppd->dd; \
- struct qib_ibport *qibp = &ppd->ibport_data; \
- /* A write can only zero the counter */ \
- if (data == 0) \
- qibp->rvp.z_##cntr = get_all_cpu_total(qibp->rvp.cntr); \
- else \
- qib_dev_err(dd, "Per CPU cntrs can only be zeroed"); \
+static ssize_t qib_store_per_cpu(struct qib_devdata *dd, const char *buf,
+ size_t count, u64 *zero, u64 cur)
+{
+ u32 val;
+ int ret;
+
+ ret = kstrtou32(buf, 0, &val);
+ if (ret)
+ return ret;
+ if (val != 0) {
+ qib_dev_err(dd, "Per CPU cntrs can only be zeroed");
+ return count;
+ }
+ *zero = cur;
+ return count;
}
-def_write_per_cpu(rc_acks)
-def_write_per_cpu(rc_qacks)
-def_write_per_cpu(rc_delayed_comp)
+static ssize_t rc_acks_show(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *attr, char *buf)
+{
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data;
-#define READ_PER_CPU_CNTR(cntr) (get_all_cpu_total(qibp->rvp.cntr) - \
- qibp->rvp.z_##cntr)
+ return sysfs_emit(buf, "%llu\n",
+ get_all_cpu_total(qibp->rvp.rc_acks) -
+ qibp->rvp.z_rc_acks);
+}
-static ssize_t diagc_attr_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
+static ssize_t rc_acks_store(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *attr, const char *buf,
+ size_t count)
{
- struct qib_diagc_attr *dattr =
- container_of(attr, struct qib_diagc_attr, attr);
- struct qib_pportdata *ppd =
- container_of(kobj, struct qib_pportdata, diagc_kobj);
- struct qib_ibport *qibp = &ppd->ibport_data;
- u64 val;
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data;
+
+ return qib_store_per_cpu(dd, buf, count, &qibp->rvp.z_rc_acks,
+ get_all_cpu_total(qibp->rvp.rc_acks));
+}
+static IB_PORT_ATTR_RW(rc_acks);
- if (!strncmp(dattr->attr.name, "rc_acks", 7))
- val = READ_PER_CPU_CNTR(rc_acks);
- else if (!strncmp(dattr->attr.name, "rc_qacks", 8))
- val = READ_PER_CPU_CNTR(rc_qacks);
- else if (!strncmp(dattr->attr.name, "rc_delayed_comp", 15))
- val = READ_PER_CPU_CNTR(rc_delayed_comp);
- else
- val = *(u32 *)((char *)qibp + dattr->counter);
+static ssize_t rc_qacks_show(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *attr, char *buf)
+{
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data;
- return sysfs_emit(buf, "%llu\n", val);
+ return sysfs_emit(buf, "%llu\n",
+ get_all_cpu_total(qibp->rvp.rc_qacks) -
+ qibp->rvp.z_rc_qacks);
}
-static ssize_t diagc_attr_store(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t size)
+static ssize_t rc_qacks_store(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *attr, const char *buf,
+ size_t count)
{
- struct qib_diagc_attr *dattr =
- container_of(attr, struct qib_diagc_attr, attr);
- struct qib_pportdata *ppd =
- container_of(kobj, struct qib_pportdata, diagc_kobj);
- struct qib_ibport *qibp = &ppd->ibport_data;
- u32 val;
- int ret;
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data;
- ret = kstrtou32(buf, 0, &val);
- if (ret)
- return ret;
+ return qib_store_per_cpu(dd, buf, count, &qibp->rvp.z_rc_qacks,
+ get_all_cpu_total(qibp->rvp.rc_qacks));
+}
+static IB_PORT_ATTR_RW(rc_qacks);
- if (!strncmp(dattr->attr.name, "rc_acks", 7))
- write_per_cpu_rc_acks(ppd, val);
- else if (!strncmp(dattr->attr.name, "rc_qacks", 8))
- write_per_cpu_rc_qacks(ppd, val);
- else if (!strncmp(dattr->attr.name, "rc_delayed_comp", 15))
- write_per_cpu_rc_delayed_comp(ppd, val);
- else
- *(u32 *)((char *)qibp + dattr->counter) = val;
- return size;
+static ssize_t rc_delayed_comp_show(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *attr, char *buf)
+{
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data;
+
+ return sysfs_emit(buf, "%llu\n",
+ get_all_cpu_total(qibp->rvp.rc_delayed_comp) -
+ qibp->rvp.z_rc_delayed_comp);
}
-static const struct sysfs_ops qib_diagc_ops = {
- .show = diagc_attr_show,
- .store = diagc_attr_store,
+static ssize_t rc_delayed_comp_store(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data;
+
+ return qib_store_per_cpu(dd, buf, count, &qibp->rvp.z_rc_delayed_comp,
+ get_all_cpu_total(qibp->rvp.rc_delayed_comp));
+}
+static IB_PORT_ATTR_RW(rc_delayed_comp);
+
+static struct attribute *port_diagc_attributes[] = {
+ &qib_diagc_attr_rc_resends.attr.attr,
+ &qib_diagc_attr_seq_naks.attr.attr,
+ &qib_diagc_attr_rdma_seq.attr.attr,
+ &qib_diagc_attr_rnr_naks.attr.attr,
+ &qib_diagc_attr_other_naks.attr.attr,
+ &qib_diagc_attr_rc_timeouts.attr.attr,
+ &qib_diagc_attr_loop_pkts.attr.attr,
+ &qib_diagc_attr_pkt_drops.attr.attr,
+ &qib_diagc_attr_dmawait.attr.attr,
+ &qib_diagc_attr_unaligned.attr.attr,
+ &qib_diagc_attr_rc_dupreq.attr.attr,
+ &qib_diagc_attr_rc_seqnak.attr.attr,
+ &qib_diagc_attr_rc_crwaits.attr.attr,
+ &ib_port_attr_rc_acks.attr,
+ &ib_port_attr_rc_qacks.attr,
+ &ib_port_attr_rc_delayed_comp.attr,
+ NULL
};
-static struct kobj_type qib_diagc_ktype = {
- .release = qib_port_release,
- .sysfs_ops = &qib_diagc_ops,
- .default_attrs = diagc_default_attributes
+static const struct attribute_group port_diagc_group = {
+ .name = "linkcontrol",
+ .attrs = port_diagc_attributes,
};
/* End diag_counters */
+const struct attribute_group *qib_attr_port_groups[] = {
+ &port_linkcontrol_group,
+ &port_ccmgta_attribute_group,
+ &port_sl2vl_group,
+ &port_diagc_group,
+ NULL,
+};
+
/* end of per-port file structures and support code */
/*
@@ -727,125 +733,3 @@ static struct attribute *qib_attributes[] = {
const struct attribute_group qib_attr_group = {
.attrs = qib_attributes,
};
-
-int qib_create_port_files(struct ib_device *ibdev, u32 port_num,
- struct kobject *kobj)
-{
- struct qib_pportdata *ppd;
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- int ret;
-
- if (!port_num || port_num > dd->num_pports) {
- qib_dev_err(dd,
- "Skipping infiniband class with invalid port %u\n",
- port_num);
- ret = -ENODEV;
- goto bail;
- }
- ppd = &dd->pport[port_num - 1];
-
- ret = kobject_init_and_add(&ppd->pport_kobj, &qib_port_ktype, kobj,
- "linkcontrol");
- if (ret) {
- qib_dev_err(dd,
- "Skipping linkcontrol sysfs info, (err %d) port %u\n",
- ret, port_num);
- goto bail_link;
- }
- kobject_uevent(&ppd->pport_kobj, KOBJ_ADD);
-
- ret = kobject_init_and_add(&ppd->sl2vl_kobj, &qib_sl2vl_ktype, kobj,
- "sl2vl");
- if (ret) {
- qib_dev_err(dd,
- "Skipping sl2vl sysfs info, (err %d) port %u\n",
- ret, port_num);
- goto bail_sl;
- }
- kobject_uevent(&ppd->sl2vl_kobj, KOBJ_ADD);
-
- ret = kobject_init_and_add(&ppd->diagc_kobj, &qib_diagc_ktype, kobj,
- "diag_counters");
- if (ret) {
- qib_dev_err(dd,
- "Skipping diag_counters sysfs info, (err %d) port %u\n",
- ret, port_num);
- goto bail_diagc;
- }
- kobject_uevent(&ppd->diagc_kobj, KOBJ_ADD);
-
- if (!qib_cc_table_size || !ppd->congestion_entries_shadow)
- return 0;
-
- ret = kobject_init_and_add(&ppd->pport_cc_kobj, &qib_port_cc_ktype,
- kobj, "CCMgtA");
- if (ret) {
- qib_dev_err(dd,
- "Skipping Congestion Control sysfs info, (err %d) port %u\n",
- ret, port_num);
- goto bail_cc;
- }
-
- kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD);
-
- ret = sysfs_create_bin_file(&ppd->pport_cc_kobj,
- &cc_setting_bin_attr);
- if (ret) {
- qib_dev_err(dd,
- "Skipping Congestion Control setting sysfs info, (err %d) port %u\n",
- ret, port_num);
- goto bail_cc;
- }
-
- ret = sysfs_create_bin_file(&ppd->pport_cc_kobj,
- &cc_table_bin_attr);
- if (ret) {
- qib_dev_err(dd,
- "Skipping Congestion Control table sysfs info, (err %d) port %u\n",
- ret, port_num);
- goto bail_cc_entry_bin;
- }
-
- qib_devinfo(dd->pcidev,
- "IB%u: Congestion Control Agent enabled for port %d\n",
- dd->unit, port_num);
-
- return 0;
-
-bail_cc_entry_bin:
- sysfs_remove_bin_file(&ppd->pport_cc_kobj, &cc_setting_bin_attr);
-bail_cc:
- kobject_put(&ppd->pport_cc_kobj);
-bail_diagc:
- kobject_put(&ppd->diagc_kobj);
-bail_sl:
- kobject_put(&ppd->sl2vl_kobj);
-bail_link:
- kobject_put(&ppd->pport_kobj);
-bail:
- return ret;
-}
-
-/*
- * Unregister and remove our files in /sys/class/infiniband.
- */
-void qib_verbs_unregister_sysfs(struct qib_devdata *dd)
-{
- struct qib_pportdata *ppd;
- int i;
-
- for (i = 0; i < dd->num_pports; i++) {
- ppd = &dd->pport[i];
- if (qib_cc_table_size &&
- ppd->congestion_entries_shadow) {
- sysfs_remove_bin_file(&ppd->pport_cc_kobj,
- &cc_setting_bin_attr);
- sysfs_remove_bin_file(&ppd->pport_cc_kobj,
- &cc_table_bin_attr);
- kobject_put(&ppd->pport_cc_kobj);
- }
- kobject_put(&ppd->diagc_kobj);
- kobject_put(&ppd->sl2vl_kobj);
- kobject_put(&ppd->pport_kobj);
- }
-}
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index d17d034ecdfd..ef91bff5c23c 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -1483,7 +1483,8 @@ static const struct ib_device_ops qib_dev_ops = {
.owner = THIS_MODULE,
.driver_id = RDMA_DRIVER_QIB,
- .init_port = qib_create_port_files,
+ .port_groups = qib_attr_port_groups,
+ .device_group = &qib_attr_group,
.modify_device = qib_modify_device,
.process_mad = qib_process_mad,
};
@@ -1612,7 +1613,6 @@ int qib_register_ib_device(struct qib_devdata *dd)
i,
dd->rcd[ctxt]->pkeys);
}
- rdma_set_device_sysfs_group(&dd->verbs_dev.rdi.ibdev, &qib_attr_group);
ib_set_device_ops(ibdev, &qib_dev_ops);
ret = rvt_register_device(&dd->verbs_dev.rdi);
@@ -1644,8 +1644,6 @@ void qib_unregister_ib_device(struct qib_devdata *dd)
{
struct qib_ibdev *dev = &dd->verbs_dev;
- qib_verbs_unregister_sysfs(dd);
-
rvt_unregister_device(&dd->verbs_dev.rdi);
if (!list_empty(&dev->piowait))
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c
index ff6a40e259d5..c49f9e19d926 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_main.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c
@@ -347,6 +347,7 @@ static const struct ib_device_ops usnic_dev_ops = {
.dereg_mr = usnic_ib_dereg_mr,
.destroy_cq = usnic_ib_destroy_cq,
.destroy_qp = usnic_ib_destroy_qp,
+ .device_group = &usnic_attr_group,
.get_dev_fw_str = usnic_get_dev_fw_str,
.get_link_layer = usnic_ib_port_link_layer,
.get_port_immutable = usnic_port_immutable,
@@ -400,8 +401,6 @@ static void *usnic_ib_device_add(struct pci_dev *dev)
ib_set_device_ops(&us_ibdev->ib_dev, &usnic_dev_ops);
- rdma_set_device_sysfs_group(&us_ibdev->ib_dev, &usnic_attr_group);
-
ret = ib_device_set_netdev(&us_ibdev->ib_dev, us_ibdev->netdev, 1);
if (ret)
goto err_fwd_dealloc;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index 6bf2d2e47d07..8ed8bc24c69f 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -162,6 +162,7 @@ static const struct ib_device_ops pvrdma_dev_ops = {
.destroy_ah = pvrdma_destroy_ah,
.destroy_cq = pvrdma_destroy_cq,
.destroy_qp = pvrdma_destroy_qp,
+ .device_group = &pvrdma_attr_group,
.get_dev_fw_str = pvrdma_get_fw_ver_str,
.get_dma_mr = pvrdma_get_dma_mr,
.get_link_layer = pvrdma_port_link_layer,
@@ -240,7 +241,6 @@ static int pvrdma_register_device(struct pvrdma_dev *dev)
if (ret)
goto err_srq_free;
spin_lock_init(&dev->srq_tbl_lock);
- rdma_set_device_sysfs_group(&dev->ib_dev, &pvrdma_attr_group);
ret = ib_register_device(&dev->ib_dev, "vmw_pvrdma%d", &dev->pdev->dev);
if (ret)
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index 601d18dda1f5..34b7af6ab9c2 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -101,8 +101,8 @@ int rvt_driver_mr_init(struct rvt_dev_info *rdi)
}
/**
- *rvt_mr_exit: clean up MR
- *@rdi: rvt dev structure
+ * rvt_mr_exit - clean up MR
+ * @rdi: rvt dev structure
*
* called when drivers have unregistered or perhaps failed to register with us
*/
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 9d13db68283c..e9f3d356b361 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -984,7 +984,8 @@ static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
spin_unlock_irq(&qp->r_lock);
}
-/** rvt_free_qpn - Free a qpn from the bit map
+/**
+ * rvt_free_qpn - Free a qpn from the bit map
* @qpt: QP table
* @qpn: queue pair number to free
*/
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
index 12ebe041a5da..ac17209816cd 100644
--- a/drivers/infiniband/sw/rdmavt/vt.c
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -144,7 +144,7 @@ static int rvt_modify_device(struct ib_device *device,
}
/**
- * rvt_query_port: Passes the query port call to the driver
+ * rvt_query_port - Passes the query port call to the driver
* @ibdev: Verbs IB dev
* @port_num: port number, 1 based from ib core
* @props: structure to hold returned properties
@@ -175,7 +175,7 @@ static int rvt_query_port(struct ib_device *ibdev, u32 port_num,
}
/**
- * rvt_modify_port
+ * rvt_modify_port - modify port
* @ibdev: Verbs IB dev
* @port_num: Port number, 1 based from ib core
* @port_modify_mask: How to change the port
@@ -418,7 +418,7 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb)
* These functions are not part of verbs specifically but are
* required for rdmavt to function.
*/
- if ((!rdi->ibdev.ops.init_port) ||
+ if ((!rdi->ibdev.ops.port_groups) ||
(!rdi->driver_f.get_pci_dev))
return -EINVAL;
break;
diff --git a/drivers/infiniband/sw/rxe/Makefile b/drivers/infiniband/sw/rxe/Makefile
index 66af72dca759..1e24673e9318 100644
--- a/drivers/infiniband/sw/rxe/Makefile
+++ b/drivers/infiniband/sw/rxe/Makefile
@@ -15,6 +15,7 @@ rdma_rxe-y := \
rxe_qp.o \
rxe_cq.o \
rxe_mr.o \
+ rxe_mw.o \
rxe_opcode.o \
rxe_mmap.o \
rxe_icrc.o \
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index 95f0de0c8b49..8e0f9c489cab 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -54,6 +54,7 @@ static void rxe_init_device_param(struct rxe_dev *rxe)
rxe->attr.max_cq = RXE_MAX_CQ;
rxe->attr.max_cqe = (1 << RXE_MAX_LOG_CQE) - 1;
rxe->attr.max_mr = RXE_MAX_MR;
+ rxe->attr.max_mw = RXE_MAX_MW;
rxe->attr.max_pd = RXE_MAX_PD;
rxe->attr.max_qp_rd_atom = RXE_MAX_QP_RD_ATOM;
rxe->attr.max_res_rd_atom = RXE_MAX_RES_RD_ATOM;
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index a6712e373eed..58ad9c2644f3 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -103,6 +103,7 @@ static enum ib_wc_opcode wr_to_wc_opcode(enum ib_wr_opcode opcode)
case IB_WR_RDMA_READ_WITH_INV: return IB_WC_RDMA_READ;
case IB_WR_LOCAL_INV: return IB_WC_LOCAL_INV;
case IB_WR_REG_MR: return IB_WC_REG_MR;
+ case IB_WR_BIND_MW: return IB_WC_BIND_MW;
default:
return 0xff;
@@ -141,7 +142,10 @@ static inline enum comp_state get_wqe(struct rxe_qp *qp,
/* we come here whether or not we found a response packet to see if
* there are any posted WQEs
*/
- wqe = queue_head(qp->sq.queue);
+ if (qp->is_user)
+ wqe = queue_head(qp->sq.queue, QUEUE_TYPE_FROM_USER);
+ else
+ wqe = queue_head(qp->sq.queue, QUEUE_TYPE_KERNEL);
*wqe_p = wqe;
/* no WQE or requester has not started it yet */
@@ -345,7 +349,7 @@ static inline enum comp_state do_read(struct rxe_qp *qp,
ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
&wqe->dma, payload_addr(pkt),
- payload_size(pkt), to_mr_obj, NULL);
+ payload_size(pkt), RXE_TO_MR_OBJ, NULL);
if (ret) {
wqe->status = IB_WC_LOC_PROT_ERR;
return COMPST_ERROR;
@@ -367,7 +371,7 @@ static inline enum comp_state do_atomic(struct rxe_qp *qp,
ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
&wqe->dma, &atomic_orig,
- sizeof(u64), to_mr_obj, NULL);
+ sizeof(u64), RXE_TO_MR_OBJ, NULL);
if (ret) {
wqe->status = IB_WC_LOC_PROT_ERR;
return COMPST_ERROR;
@@ -418,16 +422,23 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
{
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
struct rxe_cqe cqe;
+ bool post;
+
+ /* do we need to post a completion */
+ post = ((qp->sq_sig_type == IB_SIGNAL_ALL_WR) ||
+ (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
+ wqe->status != IB_WC_SUCCESS);
- if ((qp->sq_sig_type == IB_SIGNAL_ALL_WR) ||
- (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
- wqe->status != IB_WC_SUCCESS) {
+ if (post)
make_send_cqe(qp, wqe, &cqe);
- advance_consumer(qp->sq.queue);
+
+ if (qp->is_user)
+ advance_consumer(qp->sq.queue, QUEUE_TYPE_FROM_USER);
+ else
+ advance_consumer(qp->sq.queue, QUEUE_TYPE_KERNEL);
+
+ if (post)
rxe_cq_post(qp->scq, &cqe, 0);
- } else {
- advance_consumer(qp->sq.queue);
- }
if (wqe->wr.opcode == IB_WR_SEND ||
wqe->wr.opcode == IB_WR_SEND_WITH_IMM ||
@@ -515,6 +526,7 @@ static void rxe_drain_resp_pkts(struct rxe_qp *qp, bool notify)
{
struct sk_buff *skb;
struct rxe_send_wqe *wqe;
+ struct rxe_queue *q = qp->sq.queue;
while ((skb = skb_dequeue(&qp->resp_pkts))) {
rxe_drop_ref(qp);
@@ -522,12 +534,12 @@ static void rxe_drain_resp_pkts(struct rxe_qp *qp, bool notify)
ib_device_put(qp->ibqp.device);
}
- while ((wqe = queue_head(qp->sq.queue))) {
+ while ((wqe = queue_head(q, q->type))) {
if (notify) {
wqe->status = IB_WC_WR_FLUSH_ERR;
do_complete(qp, wqe);
} else {
- advance_consumer(qp->sq.queue);
+ advance_consumer(q, q->type);
}
}
}
diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
index b315ebf041ac..aef288f164fd 100644
--- a/drivers/infiniband/sw/rxe/rxe_cq.c
+++ b/drivers/infiniband/sw/rxe/rxe_cq.c
@@ -25,7 +25,11 @@ int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
}
if (cq) {
- count = queue_count(cq->queue);
+ if (cq->is_user)
+ count = queue_count(cq->queue, QUEUE_TYPE_TO_USER);
+ else
+ count = queue_count(cq->queue, QUEUE_TYPE_KERNEL);
+
if (cqe < count) {
pr_warn("cqe(%d) < current # elements in queue (%d)",
cqe, count);
@@ -59,9 +63,11 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
struct rxe_create_cq_resp __user *uresp)
{
int err;
+ enum queue_type type;
+ type = uresp ? QUEUE_TYPE_TO_USER : QUEUE_TYPE_KERNEL;
cq->queue = rxe_queue_init(rxe, &cqe,
- sizeof(struct rxe_cqe));
+ sizeof(struct rxe_cqe), type);
if (!cq->queue) {
pr_warn("unable to create cq\n");
return -ENOMEM;
@@ -106,10 +112,17 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
{
struct ib_event ev;
unsigned long flags;
+ int full;
+ void *addr;
spin_lock_irqsave(&cq->cq_lock, flags);
- if (unlikely(queue_full(cq->queue))) {
+ if (cq->is_user)
+ full = queue_full(cq->queue, QUEUE_TYPE_TO_USER);
+ else
+ full = queue_full(cq->queue, QUEUE_TYPE_KERNEL);
+
+ if (unlikely(full)) {
spin_unlock_irqrestore(&cq->cq_lock, flags);
if (cq->ibcq.event_handler) {
ev.device = cq->ibcq.device;
@@ -121,9 +134,18 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
return -EBUSY;
}
- memcpy(producer_addr(cq->queue), cqe, sizeof(*cqe));
+ if (cq->is_user)
+ addr = producer_addr(cq->queue, QUEUE_TYPE_TO_USER);
+ else
+ addr = producer_addr(cq->queue, QUEUE_TYPE_KERNEL);
+
+ memcpy(addr, cqe, sizeof(*cqe));
+
+ if (cq->is_user)
+ advance_producer(cq->queue, QUEUE_TYPE_TO_USER);
+ else
+ advance_producer(cq->queue, QUEUE_TYPE_KERNEL);
- advance_producer(cq->queue);
spin_unlock_irqrestore(&cq->cq_lock, flags);
if ((cq->notify == IB_CQ_NEXT_COMP) ||
diff --git a/drivers/infiniband/sw/rxe/rxe_hw_counters.c b/drivers/infiniband/sw/rxe/rxe_hw_counters.c
index f469fd1c753d..d5ceb706d964 100644
--- a/drivers/infiniband/sw/rxe/rxe_hw_counters.c
+++ b/drivers/infiniband/sw/rxe/rxe_hw_counters.c
@@ -40,13 +40,10 @@ int rxe_ib_get_hw_stats(struct ib_device *ibdev,
return ARRAY_SIZE(rxe_counter_name);
}
-struct rdma_hw_stats *rxe_ib_alloc_hw_stats(struct ib_device *ibdev,
- u32 port_num)
+struct rdma_hw_stats *rxe_ib_alloc_hw_port_stats(struct ib_device *ibdev,
+ u32 port_num)
{
BUILD_BUG_ON(ARRAY_SIZE(rxe_counter_name) != RXE_NUM_OF_COUNTERS);
- /* We support only per port stats */
- if (!port_num)
- return NULL;
return rdma_alloc_hw_stats_struct(rxe_counter_name,
ARRAY_SIZE(rxe_counter_name),
diff --git a/drivers/infiniband/sw/rxe/rxe_hw_counters.h b/drivers/infiniband/sw/rxe/rxe_hw_counters.h
index 2f369acb46d7..71f4d4fa9dc8 100644
--- a/drivers/infiniband/sw/rxe/rxe_hw_counters.h
+++ b/drivers/infiniband/sw/rxe/rxe_hw_counters.h
@@ -29,8 +29,8 @@ enum rxe_counters {
RXE_NUM_OF_COUNTERS
};
-struct rdma_hw_stats *rxe_ib_alloc_hw_stats(struct ib_device *ibdev,
- u32 port_num);
+struct rdma_hw_stats *rxe_ib_alloc_hw_port_stats(struct ib_device *ibdev,
+ u32 port_num);
int rxe_ib_get_hw_stats(struct ib_device *ibdev,
struct rdma_hw_stats *stats,
u32 port, int index);
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index ef8061d2fbe0..1ddb20855dee 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -71,40 +71,32 @@ struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *dev, u32 size,
int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
/* rxe_mr.c */
-enum copy_direction {
- to_mr_obj,
- from_mr_obj,
-};
-
+u8 rxe_get_next_key(u32 last_key);
void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr);
-
int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
- int access, struct ib_udata *udata, struct rxe_mr *mr);
-
+ int access, struct rxe_mr *mr);
int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr);
-
int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
- enum copy_direction dir, u32 *crcp);
-
+ enum rxe_mr_copy_dir dir, u32 *crcp);
int copy_data(struct rxe_pd *pd, int access,
struct rxe_dma_info *dma, void *addr, int length,
- enum copy_direction dir, u32 *crcp);
-
+ enum rxe_mr_copy_dir dir, u32 *crcp);
void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length);
-
-enum lookup_type {
- lookup_local,
- lookup_remote,
-};
-
struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
- enum lookup_type type);
-
+ enum rxe_mr_lookup_type type);
int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length);
-
+int advance_dma_data(struct rxe_dma_info *dma, unsigned int length);
+int rxe_invalidate_mr(struct rxe_qp *qp, u32 rkey);
+int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
void rxe_mr_cleanup(struct rxe_pool_entry *arg);
-int advance_dma_data(struct rxe_dma_info *dma, unsigned int length);
+/* rxe_mw.c */
+int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata);
+int rxe_dealloc_mw(struct ib_mw *ibmw);
+int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe);
+int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey);
+struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey);
+void rxe_mw_cleanup(struct rxe_pool_entry *arg);
/* rxe_net.c */
void rxe_loopback(struct sk_buff *skb);
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 9f63947bab12..6aabcb4de235 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -7,19 +7,17 @@
#include "rxe.h"
#include "rxe_loc.h"
-/*
- * lfsr (linear feedback shift register) with period 255
+/* Return a random 8 bit key value that is
+ * different than the last_key. Set last_key to -1
+ * if this is the first key for an MR or MW
*/
-static u8 rxe_get_key(void)
+u8 rxe_get_next_key(u32 last_key)
{
- static u32 key = 1;
-
- key = key << 1;
+ u8 key;
- key |= (0 != (key & 0x100)) ^ (0 != (key & 0x10))
- ^ (0 != (key & 0x80)) ^ (0 != (key & 0x40));
-
- key &= 0xff;
+ do {
+ get_random_bytes(&key, 1);
+ } while (key == last_key);
return key;
}
@@ -47,7 +45,7 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
static void rxe_mr_init(int access, struct rxe_mr *mr)
{
- u32 lkey = mr->pelem.index << 8 | rxe_get_key();
+ u32 lkey = mr->pelem.index << 8 | rxe_get_next_key(-1);
u32 rkey = (access & IB_ACCESS_REMOTE) ? lkey : 0;
mr->ibmr.lkey = lkey;
@@ -57,21 +55,6 @@ static void rxe_mr_init(int access, struct rxe_mr *mr)
mr->map_shift = ilog2(RXE_BUF_PER_MAP);
}
-void rxe_mr_cleanup(struct rxe_pool_entry *arg)
-{
- struct rxe_mr *mr = container_of(arg, typeof(*mr), pelem);
- int i;
-
- ib_umem_release(mr->umem);
-
- if (mr->map) {
- for (i = 0; i < mr->num_map; i++)
- kfree(mr->map[i]);
-
- kfree(mr->map);
- }
-}
-
static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf)
{
int i;
@@ -121,7 +104,7 @@ void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr)
}
int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
- int access, struct ib_udata *udata, struct rxe_mr *mr)
+ int access, struct rxe_mr *mr)
{
struct rxe_map **map;
struct rxe_phys_buf *buf = NULL;
@@ -135,7 +118,7 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
if (IS_ERR(umem)) {
pr_warn("err %d from rxe_umem_get\n",
(int)PTR_ERR(umem));
- err = -EINVAL;
+ err = PTR_ERR(umem);
goto err1;
}
@@ -300,7 +283,7 @@ out:
* crc32 if crcp is not zero. caller must hold a reference to mr
*/
int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
- enum copy_direction dir, u32 *crcp)
+ enum rxe_mr_copy_dir dir, u32 *crcp)
{
int err;
int bytes;
@@ -318,9 +301,9 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
if (mr->type == RXE_MR_TYPE_DMA) {
u8 *src, *dest;
- src = (dir == to_mr_obj) ? addr : ((void *)(uintptr_t)iova);
+ src = (dir == RXE_TO_MR_OBJ) ? addr : ((void *)(uintptr_t)iova);
- dest = (dir == to_mr_obj) ? ((void *)(uintptr_t)iova) : addr;
+ dest = (dir == RXE_TO_MR_OBJ) ? ((void *)(uintptr_t)iova) : addr;
memcpy(dest, src, length);
@@ -348,8 +331,8 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
u8 *src, *dest;
va = (u8 *)(uintptr_t)buf->addr + offset;
- src = (dir == to_mr_obj) ? addr : va;
- dest = (dir == to_mr_obj) ? va : addr;
+ src = (dir == RXE_TO_MR_OBJ) ? addr : va;
+ dest = (dir == RXE_TO_MR_OBJ) ? va : addr;
bytes = buf->size - offset;
@@ -394,7 +377,7 @@ int copy_data(
struct rxe_dma_info *dma,
void *addr,
int length,
- enum copy_direction dir,
+ enum rxe_mr_copy_dir dir,
u32 *crcp)
{
int bytes;
@@ -414,7 +397,7 @@ int copy_data(
}
if (sge->length && (offset < sge->length)) {
- mr = lookup_mr(pd, access, sge->lkey, lookup_local);
+ mr = lookup_mr(pd, access, sge->lkey, RXE_LOOKUP_LOCAL);
if (!mr) {
err = -EINVAL;
goto err1;
@@ -440,7 +423,7 @@ int copy_data(
if (sge->length) {
mr = lookup_mr(pd, access, sge->lkey,
- lookup_local);
+ RXE_LOOKUP_LOCAL);
if (!mr) {
err = -EINVAL;
goto err1;
@@ -522,7 +505,7 @@ int advance_dma_data(struct rxe_dma_info *dma, unsigned int length)
* (4) verify that mr state is valid
*/
struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
- enum lookup_type type)
+ enum rxe_mr_lookup_type type)
{
struct rxe_mr *mr;
struct rxe_dev *rxe = to_rdev(pd->ibpd.device);
@@ -532,8 +515,8 @@ struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
if (!mr)
return NULL;
- if (unlikely((type == lookup_local && mr_lkey(mr) != key) ||
- (type == lookup_remote && mr_rkey(mr) != key) ||
+ if (unlikely((type == RXE_LOOKUP_LOCAL && mr_lkey(mr) != key) ||
+ (type == RXE_LOOKUP_REMOTE && mr_rkey(mr) != key) ||
mr_pd(mr) != pd || (access && !(access & mr->access)) ||
mr->state != RXE_MR_STATE_VALID)) {
rxe_drop_ref(mr);
@@ -542,3 +525,72 @@ struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
return mr;
}
+
+int rxe_invalidate_mr(struct rxe_qp *qp, u32 rkey)
+{
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ struct rxe_mr *mr;
+ int ret;
+
+ mr = rxe_pool_get_index(&rxe->mr_pool, rkey >> 8);
+ if (!mr) {
+ pr_err("%s: No MR for rkey %#x\n", __func__, rkey);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (rkey != mr->ibmr.rkey) {
+ pr_err("%s: rkey (%#x) doesn't match mr->ibmr.rkey (%#x)\n",
+ __func__, rkey, mr->ibmr.rkey);
+ ret = -EINVAL;
+ goto err_drop_ref;
+ }
+
+ if (atomic_read(&mr->num_mw) > 0) {
+ pr_warn("%s: Attempt to invalidate an MR while bound to MWs\n",
+ __func__);
+ ret = -EINVAL;
+ goto err_drop_ref;
+ }
+
+ mr->state = RXE_MR_STATE_FREE;
+ ret = 0;
+
+err_drop_ref:
+ rxe_drop_ref(mr);
+err:
+ return ret;
+}
+
+int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
+{
+ struct rxe_mr *mr = to_rmr(ibmr);
+
+ if (atomic_read(&mr->num_mw) > 0) {
+ pr_warn("%s: Attempt to deregister an MR while bound to MWs\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ mr->state = RXE_MR_STATE_ZOMBIE;
+ rxe_drop_ref(mr_pd(mr));
+ rxe_drop_index(mr);
+ rxe_drop_ref(mr);
+
+ return 0;
+}
+
+void rxe_mr_cleanup(struct rxe_pool_entry *arg)
+{
+ struct rxe_mr *mr = container_of(arg, typeof(*mr), pelem);
+ int i;
+
+ ib_umem_release(mr->umem);
+
+ if (mr->map) {
+ for (i = 0; i < mr->num_map; i++)
+ kfree(mr->map[i]);
+
+ kfree(mr->map);
+ }
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_mw.c b/drivers/infiniband/sw/rxe/rxe_mw.c
new file mode 100644
index 000000000000..5ba77df7598e
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_mw.c
@@ -0,0 +1,343 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2020 Hewlett Packard Enterprise, Inc. All rights reserved.
+ */
+
+#include "rxe.h"
+
+int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
+{
+ struct rxe_mw *mw = to_rmw(ibmw);
+ struct rxe_pd *pd = to_rpd(ibmw->pd);
+ struct rxe_dev *rxe = to_rdev(ibmw->device);
+ int ret;
+
+ rxe_add_ref(pd);
+
+ ret = rxe_add_to_pool(&rxe->mw_pool, mw);
+ if (ret) {
+ rxe_drop_ref(pd);
+ return ret;
+ }
+
+ rxe_add_index(mw);
+ ibmw->rkey = (mw->pelem.index << 8) | rxe_get_next_key(-1);
+ mw->state = (mw->ibmw.type == IB_MW_TYPE_2) ?
+ RXE_MW_STATE_FREE : RXE_MW_STATE_VALID;
+ spin_lock_init(&mw->lock);
+
+ return 0;
+}
+
+static void rxe_do_dealloc_mw(struct rxe_mw *mw)
+{
+ if (mw->mr) {
+ struct rxe_mr *mr = mw->mr;
+
+ mw->mr = NULL;
+ atomic_dec(&mr->num_mw);
+ rxe_drop_ref(mr);
+ }
+
+ if (mw->qp) {
+ struct rxe_qp *qp = mw->qp;
+
+ mw->qp = NULL;
+ rxe_drop_ref(qp);
+ }
+
+ mw->access = 0;
+ mw->addr = 0;
+ mw->length = 0;
+ mw->state = RXE_MW_STATE_INVALID;
+}
+
+int rxe_dealloc_mw(struct ib_mw *ibmw)
+{
+ struct rxe_mw *mw = to_rmw(ibmw);
+ struct rxe_pd *pd = to_rpd(ibmw->pd);
+ unsigned long flags;
+
+ spin_lock_irqsave(&mw->lock, flags);
+ rxe_do_dealloc_mw(mw);
+ spin_unlock_irqrestore(&mw->lock, flags);
+
+ rxe_drop_ref(mw);
+ rxe_drop_ref(pd);
+
+ return 0;
+}
+
+static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
+ struct rxe_mw *mw, struct rxe_mr *mr)
+{
+ if (mw->ibmw.type == IB_MW_TYPE_1) {
+ if (unlikely(mw->state != RXE_MW_STATE_VALID)) {
+ pr_err_once(
+ "attempt to bind a type 1 MW not in the valid state\n");
+ return -EINVAL;
+ }
+
+ /* o10-36.2.2 */
+ if (unlikely((mw->access & IB_ZERO_BASED))) {
+ pr_err_once("attempt to bind a zero based type 1 MW\n");
+ return -EINVAL;
+ }
+ }
+
+ if (mw->ibmw.type == IB_MW_TYPE_2) {
+ /* o10-37.2.30 */
+ if (unlikely(mw->state != RXE_MW_STATE_FREE)) {
+ pr_err_once(
+ "attempt to bind a type 2 MW not in the free state\n");
+ return -EINVAL;
+ }
+
+ /* C10-72 */
+ if (unlikely(qp->pd != to_rpd(mw->ibmw.pd))) {
+ pr_err_once(
+ "attempt to bind type 2 MW with qp with different PD\n");
+ return -EINVAL;
+ }
+
+ /* o10-37.2.40 */
+ if (unlikely(!mr || wqe->wr.wr.mw.length == 0)) {
+ pr_err_once(
+ "attempt to invalidate type 2 MW by binding with NULL or zero length MR\n");
+ return -EINVAL;
+ }
+ }
+
+ if (unlikely((wqe->wr.wr.mw.rkey & 0xff) == (mw->ibmw.rkey & 0xff))) {
+ pr_err_once("attempt to bind MW with same key\n");
+ return -EINVAL;
+ }
+
+ /* remaining checks only apply to a nonzero MR */
+ if (!mr)
+ return 0;
+
+ if (unlikely(mr->access & IB_ZERO_BASED)) {
+ pr_err_once("attempt to bind MW to zero based MR\n");
+ return -EINVAL;
+ }
+
+ /* C10-73 */
+ if (unlikely(!(mr->access & IB_ACCESS_MW_BIND))) {
+ pr_err_once(
+ "attempt to bind an MW to an MR without bind access\n");
+ return -EINVAL;
+ }
+
+ /* C10-74 */
+ if (unlikely((mw->access &
+ (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_ATOMIC)) &&
+ !(mr->access & IB_ACCESS_LOCAL_WRITE))) {
+ pr_err_once(
+ "attempt to bind an writeable MW to an MR without local write access\n");
+ return -EINVAL;
+ }
+
+ /* C10-75 */
+ if (mw->access & IB_ZERO_BASED) {
+ if (unlikely(wqe->wr.wr.mw.length > mr->length)) {
+ pr_err_once(
+ "attempt to bind a ZB MW outside of the MR\n");
+ return -EINVAL;
+ }
+ } else {
+ if (unlikely((wqe->wr.wr.mw.addr < mr->iova) ||
+ ((wqe->wr.wr.mw.addr + wqe->wr.wr.mw.length) >
+ (mr->iova + mr->length)))) {
+ pr_err_once(
+ "attempt to bind a VA MW outside of the MR\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void rxe_do_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
+ struct rxe_mw *mw, struct rxe_mr *mr)
+{
+ u32 rkey;
+ u32 new_rkey;
+
+ rkey = mw->ibmw.rkey;
+ new_rkey = (rkey & 0xffffff00) | (wqe->wr.wr.mw.rkey & 0x000000ff);
+
+ mw->ibmw.rkey = new_rkey;
+ mw->access = wqe->wr.wr.mw.access;
+ mw->state = RXE_MW_STATE_VALID;
+ mw->addr = wqe->wr.wr.mw.addr;
+ mw->length = wqe->wr.wr.mw.length;
+
+ if (mw->mr) {
+ rxe_drop_ref(mw->mr);
+ atomic_dec(&mw->mr->num_mw);
+ mw->mr = NULL;
+ }
+
+ if (mw->length) {
+ mw->mr = mr;
+ atomic_inc(&mr->num_mw);
+ rxe_add_ref(mr);
+ }
+
+ if (mw->ibmw.type == IB_MW_TYPE_2) {
+ rxe_add_ref(qp);
+ mw->qp = qp;
+ }
+}
+
+int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
+{
+ int ret;
+ struct rxe_mw *mw;
+ struct rxe_mr *mr;
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ unsigned long flags;
+
+ mw = rxe_pool_get_index(&rxe->mw_pool,
+ wqe->wr.wr.mw.mw_rkey >> 8);
+ if (unlikely(!mw)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (unlikely(mw->ibmw.rkey != wqe->wr.wr.mw.mw_rkey)) {
+ ret = -EINVAL;
+ goto err_drop_mw;
+ }
+
+ if (likely(wqe->wr.wr.mw.length)) {
+ mr = rxe_pool_get_index(&rxe->mr_pool,
+ wqe->wr.wr.mw.mr_lkey >> 8);
+ if (unlikely(!mr)) {
+ ret = -EINVAL;
+ goto err_drop_mw;
+ }
+
+ if (unlikely(mr->ibmr.lkey != wqe->wr.wr.mw.mr_lkey)) {
+ ret = -EINVAL;
+ goto err_drop_mr;
+ }
+ } else {
+ mr = NULL;
+ }
+
+ spin_lock_irqsave(&mw->lock, flags);
+
+ ret = rxe_check_bind_mw(qp, wqe, mw, mr);
+ if (ret)
+ goto err_unlock;
+
+ rxe_do_bind_mw(qp, wqe, mw, mr);
+err_unlock:
+ spin_unlock_irqrestore(&mw->lock, flags);
+err_drop_mr:
+ if (mr)
+ rxe_drop_ref(mr);
+err_drop_mw:
+ rxe_drop_ref(mw);
+err:
+ return ret;
+}
+
+static int rxe_check_invalidate_mw(struct rxe_qp *qp, struct rxe_mw *mw)
+{
+ if (unlikely(mw->state == RXE_MW_STATE_INVALID))
+ return -EINVAL;
+
+ /* o10-37.2.26 */
+ if (unlikely(mw->ibmw.type == IB_MW_TYPE_1))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void rxe_do_invalidate_mw(struct rxe_mw *mw)
+{
+ struct rxe_qp *qp;
+ struct rxe_mr *mr;
+
+ /* valid type 2 MW will always have a QP pointer */
+ qp = mw->qp;
+ mw->qp = NULL;
+ rxe_drop_ref(qp);
+
+ /* valid type 2 MW will always have an MR pointer */
+ mr = mw->mr;
+ mw->mr = NULL;
+ atomic_dec(&mr->num_mw);
+ rxe_drop_ref(mr);
+
+ mw->access = 0;
+ mw->addr = 0;
+ mw->length = 0;
+ mw->state = RXE_MW_STATE_FREE;
+}
+
+int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey)
+{
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ unsigned long flags;
+ struct rxe_mw *mw;
+ int ret;
+
+ mw = rxe_pool_get_index(&rxe->mw_pool, rkey >> 8);
+ if (!mw) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (rkey != mw->ibmw.rkey) {
+ ret = -EINVAL;
+ goto err_drop_ref;
+ }
+
+ spin_lock_irqsave(&mw->lock, flags);
+
+ ret = rxe_check_invalidate_mw(qp, mw);
+ if (ret)
+ goto err_unlock;
+
+ rxe_do_invalidate_mw(mw);
+err_unlock:
+ spin_unlock_irqrestore(&mw->lock, flags);
+err_drop_ref:
+ rxe_drop_ref(mw);
+err:
+ return ret;
+}
+
+struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey)
+{
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ struct rxe_pd *pd = to_rpd(qp->ibqp.pd);
+ struct rxe_mw *mw;
+ int index = rkey >> 8;
+
+ mw = rxe_pool_get_index(&rxe->mw_pool, index);
+ if (!mw)
+ return NULL;
+
+ if (unlikely((rxe_mw_rkey(mw) != rkey) || rxe_mw_pd(mw) != pd ||
+ (mw->ibmw.type == IB_MW_TYPE_2 && mw->qp != qp) ||
+ (mw->length == 0) ||
+ (access && !(access & mw->access)) ||
+ mw->state != RXE_MW_STATE_VALID)) {
+ rxe_drop_ref(mw);
+ return NULL;
+ }
+
+ return mw;
+}
+
+void rxe_mw_cleanup(struct rxe_pool_entry *elem)
+{
+ struct rxe_mw *mw = container_of(elem, typeof(*mw), pelem);
+
+ rxe_drop_index(mw);
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 01662727dca0..dec92928a1cd 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -207,10 +207,8 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
/* Create UDP socket */
err = udp_sock_create(net, &udp_cfg, &sock);
- if (err < 0) {
- pr_err("failed to create udp socket. err = %d\n", err);
+ if (err < 0)
return ERR_PTR(err);
- }
tnl_cfg.encap_type = 1;
tnl_cfg.encap_rcv = rxe_udp_encap_recv;
@@ -269,8 +267,6 @@ static void prepare_ipv4_hdr(struct dst_entry *dst, struct sk_buff *skb,
iph->ttl = ttl;
__ip_select_ident(dev_net(dst->dev), iph,
skb_shinfo(skb)->gso_segs ?: 1);
- iph->tot_len = htons(skb->len);
- ip_send_check(iph);
}
static void prepare_ipv6_hdr(struct dst_entry *dst, struct sk_buff *skb,
@@ -472,7 +468,7 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
pkt->rxe = rxe;
pkt->port_num = port_num;
- pkt->hdr = skb_put_zero(skb, paylen);
+ pkt->hdr = skb_put(skb, paylen);
pkt->mask |= RXE_GRH_MASK;
out:
@@ -619,6 +615,12 @@ static int rxe_net_ipv6_init(void)
recv_sockets.sk6 = rxe_setup_udp_tunnel(&init_net,
htons(ROCE_V2_UDP_DPORT), true);
+ if (PTR_ERR(recv_sockets.sk6) == -EAFNOSUPPORT) {
+ recv_sockets.sk6 = NULL;
+ pr_warn("IPv6 is not supported, can not create a UDPv6 socket\n");
+ return 0;
+ }
+
if (IS_ERR(recv_sockets.sk6)) {
recv_sockets.sk6 = NULL;
pr_err("Failed to create IPv6 UDP tunnel\n");
diff --git a/drivers/infiniband/sw/rxe/rxe_opcode.c b/drivers/infiniband/sw/rxe/rxe_opcode.c
index 0cb4b01fd910..3ef5a10a6efd 100644
--- a/drivers/infiniband/sw/rxe/rxe_opcode.c
+++ b/drivers/infiniband/sw/rxe/rxe_opcode.c
@@ -87,13 +87,20 @@ struct rxe_wr_opcode_info rxe_wr_opcode_info[] = {
[IB_WR_LOCAL_INV] = {
.name = "IB_WR_LOCAL_INV",
.mask = {
- [IB_QPT_RC] = WR_REG_MASK,
+ [IB_QPT_RC] = WR_LOCAL_OP_MASK,
},
},
[IB_WR_REG_MR] = {
.name = "IB_WR_REG_MR",
.mask = {
- [IB_QPT_RC] = WR_REG_MASK,
+ [IB_QPT_RC] = WR_LOCAL_OP_MASK,
+ },
+ },
+ [IB_WR_BIND_MW] = {
+ .name = "IB_WR_BIND_MW",
+ .mask = {
+ [IB_QPT_RC] = WR_LOCAL_OP_MASK,
+ [IB_QPT_UC] = WR_LOCAL_OP_MASK,
},
},
};
diff --git a/drivers/infiniband/sw/rxe/rxe_opcode.h b/drivers/infiniband/sw/rxe/rxe_opcode.h
index 1041ac9a9233..e02f039b8c44 100644
--- a/drivers/infiniband/sw/rxe/rxe_opcode.h
+++ b/drivers/infiniband/sw/rxe/rxe_opcode.h
@@ -19,8 +19,7 @@ enum rxe_wr_mask {
WR_SEND_MASK = BIT(2),
WR_READ_MASK = BIT(3),
WR_WRITE_MASK = BIT(4),
- WR_LOCAL_MASK = BIT(5),
- WR_REG_MASK = BIT(6),
+ WR_LOCAL_OP_MASK = BIT(5),
WR_READ_OR_WRITE_MASK = WR_READ_MASK | WR_WRITE_MASK,
WR_READ_WRITE_OR_SEND_MASK = WR_READ_OR_WRITE_MASK | WR_SEND_MASK,
diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
index 25ab50d9b7c2..742e6ec93686 100644
--- a/drivers/infiniband/sw/rxe/rxe_param.h
+++ b/drivers/infiniband/sw/rxe/rxe_param.h
@@ -37,7 +37,6 @@ static inline enum ib_mtu eth_mtu_int_to_enum(int mtu)
enum rxe_device_param {
RXE_MAX_MR_SIZE = -1ull,
RXE_PAGE_SIZE_CAP = 0xfffff000,
- RXE_MAX_QP = 0x10000,
RXE_MAX_QP_WR = 0x4000,
RXE_DEVICE_CAP_FLAGS = IB_DEVICE_BAD_PKEY_CNTR
| IB_DEVICE_BAD_QKEY_CNTR
@@ -49,7 +48,10 @@ enum rxe_device_param {
| IB_DEVICE_RC_RNR_NAK_GEN
| IB_DEVICE_SRQ_RESIZE
| IB_DEVICE_MEM_MGT_EXTENSIONS
- | IB_DEVICE_ALLOW_USER_UNREG,
+ | IB_DEVICE_ALLOW_USER_UNREG
+ | IB_DEVICE_MEM_WINDOW
+ | IB_DEVICE_MEM_WINDOW_TYPE_2A
+ | IB_DEVICE_MEM_WINDOW_TYPE_2B,
RXE_MAX_SGE = 32,
RXE_MAX_WQE_SIZE = sizeof(struct rxe_send_wqe) +
sizeof(struct ib_sge) * RXE_MAX_SGE,
@@ -58,7 +60,6 @@ enum rxe_device_param {
RXE_MAX_SGE_RD = 32,
RXE_MAX_CQ = 16384,
RXE_MAX_LOG_CQE = 15,
- RXE_MAX_MR = 256 * 1024,
RXE_MAX_PD = 0x7ffc,
RXE_MAX_QP_RD_ATOM = 128,
RXE_MAX_RES_RD_ATOM = 0x3f000,
@@ -67,7 +68,6 @@ enum rxe_device_param {
RXE_MAX_MCAST_QP_ATTACH = 56,
RXE_MAX_TOT_MCAST_QP_ATTACH = 0x70000,
RXE_MAX_AH = 100,
- RXE_MAX_SRQ = 960,
RXE_MAX_SRQ_WR = 0x4000,
RXE_MIN_SRQ_WR = 1,
RXE_MAX_SRQ_SGE = 27,
@@ -80,16 +80,21 @@ enum rxe_device_param {
RXE_NUM_PORT = 1,
+ RXE_MAX_QP = 0x10000,
RXE_MIN_QP_INDEX = 16,
RXE_MAX_QP_INDEX = 0x00020000,
+ RXE_MAX_SRQ = 0x00001000,
RXE_MIN_SRQ_INDEX = 0x00020001,
RXE_MAX_SRQ_INDEX = 0x00040000,
+ RXE_MAX_MR = 0x00001000,
+ RXE_MAX_MW = 0x00001000,
RXE_MIN_MR_INDEX = 0x00000001,
- RXE_MAX_MR_INDEX = 0x00040000,
- RXE_MIN_MW_INDEX = 0x00040001,
- RXE_MAX_MW_INDEX = 0x00060000,
+ RXE_MAX_MR_INDEX = 0x00010000,
+ RXE_MIN_MW_INDEX = 0x00010001,
+ RXE_MAX_MW_INDEX = 0x00020000,
+
RXE_MAX_PKT_PER_ACK = 64,
RXE_MAX_UNACKED_PSNS = 128,
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index d24901f2af3f..0b8e7c6255a2 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -65,6 +65,7 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
.name = "rxe-mw",
.size = sizeof(struct rxe_mw),
.elem_offset = offsetof(struct rxe_mw, pelem),
+ .cleanup = rxe_mw_cleanup,
.flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
.max_index = RXE_MAX_MW_INDEX,
.min_index = RXE_MIN_MW_INDEX,
@@ -183,7 +184,7 @@ static u32 alloc_index(struct rxe_pool *pool)
return index + pool->index.min_index;
}
-static void insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new)
+static int rxe_insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new)
{
struct rb_node **link = &pool->index.tree.rb_node;
struct rb_node *parent = NULL;
@@ -195,7 +196,7 @@ static void insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new)
if (elem->index == new->index) {
pr_warn("element already exists!\n");
- goto out;
+ return -EINVAL;
}
if (elem->index > new->index)
@@ -206,11 +207,11 @@ static void insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new)
rb_link_node(&new->index_node, parent, link);
rb_insert_color(&new->index_node, &pool->index.tree);
-out:
- return;
+
+ return 0;
}
-static void insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new)
+static int rxe_insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new)
{
struct rb_node **link = &pool->key.tree.rb_node;
struct rb_node *parent = NULL;
@@ -226,7 +227,7 @@ static void insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new)
if (cmp == 0) {
pr_warn("key already exists!\n");
- goto out;
+ return -EINVAL;
}
if (cmp > 0)
@@ -237,26 +238,32 @@ static void insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new)
rb_link_node(&new->key_node, parent, link);
rb_insert_color(&new->key_node, &pool->key.tree);
-out:
- return;
+
+ return 0;
}
-void __rxe_add_key_locked(struct rxe_pool_entry *elem, void *key)
+int __rxe_add_key_locked(struct rxe_pool_entry *elem, void *key)
{
struct rxe_pool *pool = elem->pool;
+ int err;
memcpy((u8 *)elem + pool->key.key_offset, key, pool->key.key_size);
- insert_key(pool, elem);
+ err = rxe_insert_key(pool, elem);
+
+ return err;
}
-void __rxe_add_key(struct rxe_pool_entry *elem, void *key)
+int __rxe_add_key(struct rxe_pool_entry *elem, void *key)
{
struct rxe_pool *pool = elem->pool;
unsigned long flags;
+ int err;
write_lock_irqsave(&pool->pool_lock, flags);
- __rxe_add_key_locked(elem, key);
+ err = __rxe_add_key_locked(elem, key);
write_unlock_irqrestore(&pool->pool_lock, flags);
+
+ return err;
}
void __rxe_drop_key_locked(struct rxe_pool_entry *elem)
@@ -276,22 +283,28 @@ void __rxe_drop_key(struct rxe_pool_entry *elem)
write_unlock_irqrestore(&pool->pool_lock, flags);
}
-void __rxe_add_index_locked(struct rxe_pool_entry *elem)
+int __rxe_add_index_locked(struct rxe_pool_entry *elem)
{
struct rxe_pool *pool = elem->pool;
+ int err;
elem->index = alloc_index(pool);
- insert_index(pool, elem);
+ err = rxe_insert_index(pool, elem);
+
+ return err;
}
-void __rxe_add_index(struct rxe_pool_entry *elem)
+int __rxe_add_index(struct rxe_pool_entry *elem)
{
struct rxe_pool *pool = elem->pool;
unsigned long flags;
+ int err;
write_lock_irqsave(&pool->pool_lock, flags);
- __rxe_add_index_locked(elem);
+ err = __rxe_add_index_locked(elem);
write_unlock_irqrestore(&pool->pool_lock, flags);
+
+ return err;
}
void __rxe_drop_index_locked(struct rxe_pool_entry *elem)
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.h b/drivers/infiniband/sw/rxe/rxe_pool.h
index 61210b300a78..1feca1bffced 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.h
+++ b/drivers/infiniband/sw/rxe/rxe_pool.h
@@ -111,11 +111,11 @@ int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem);
/* assign an index to an indexed object and insert object into
* pool's rb tree holding and not holding the pool_lock
*/
-void __rxe_add_index_locked(struct rxe_pool_entry *elem);
+int __rxe_add_index_locked(struct rxe_pool_entry *elem);
#define rxe_add_index_locked(obj) __rxe_add_index_locked(&(obj)->pelem)
-void __rxe_add_index(struct rxe_pool_entry *elem);
+int __rxe_add_index(struct rxe_pool_entry *elem);
#define rxe_add_index(obj) __rxe_add_index(&(obj)->pelem)
@@ -133,11 +133,11 @@ void __rxe_drop_index(struct rxe_pool_entry *elem);
/* assign a key to a keyed object and insert object into
* pool's rb tree holding and not holding pool_lock
*/
-void __rxe_add_key_locked(struct rxe_pool_entry *elem, void *key);
+int __rxe_add_key_locked(struct rxe_pool_entry *elem, void *key);
#define rxe_add_key_locked(obj, key) __rxe_add_key_locked(&(obj)->pelem, key)
-void __rxe_add_key(struct rxe_pool_entry *elem, void *key);
+int __rxe_add_key(struct rxe_pool_entry *elem, void *key);
#define rxe_add_key(obj, key) __rxe_add_key(&(obj)->pelem, key)
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index b0f350d674fd..1ab6af7ddb25 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -136,7 +136,6 @@ static void free_rd_atomic_resources(struct rxe_qp *qp)
void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res)
{
if (res->type == RXE_ATOMIC_MASK) {
- rxe_drop_ref(qp);
kfree_skb(res->atomic.skb);
} else if (res->type == RXE_READ_MASK) {
if (res->read.mr)
@@ -206,6 +205,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
{
int err;
int wqe_size;
+ enum queue_type type;
err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
if (err < 0)
@@ -231,7 +231,9 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
qp->sq.max_inline = init->cap.max_inline_data = wqe_size;
wqe_size += sizeof(struct rxe_send_wqe);
- qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr, wqe_size);
+ type = uresp ? QUEUE_TYPE_FROM_USER : QUEUE_TYPE_KERNEL;
+ qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr,
+ wqe_size, type);
if (!qp->sq.queue)
return -ENOMEM;
@@ -246,7 +248,13 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
return err;
}
- qp->req.wqe_index = producer_index(qp->sq.queue);
+ if (qp->is_user)
+ qp->req.wqe_index = producer_index(qp->sq.queue,
+ QUEUE_TYPE_FROM_USER);
+ else
+ qp->req.wqe_index = producer_index(qp->sq.queue,
+ QUEUE_TYPE_KERNEL);
+
qp->req.state = QP_STATE_RESET;
qp->req.opcode = -1;
qp->comp.opcode = -1;
@@ -274,6 +282,7 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
{
int err;
int wqe_size;
+ enum queue_type type;
if (!qp->srq) {
qp->rq.max_wr = init->cap.max_recv_wr;
@@ -284,9 +293,9 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n",
qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size);
- qp->rq.queue = rxe_queue_init(rxe,
- &qp->rq.max_wr,
- wqe_size);
+ type = uresp ? QUEUE_TYPE_FROM_USER : QUEUE_TYPE_KERNEL;
+ qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr,
+ wqe_size, type);
if (!qp->rq.queue)
return -ENOMEM;
@@ -304,6 +313,8 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
spin_lock_init(&qp->rq.producer_lock);
spin_lock_init(&qp->rq.consumer_lock);
+ qp->rq.is_user = qp->is_user;
+
skb_queue_head_init(&qp->resp_pkts);
rxe_init_task(rxe, &qp->resp.task, qp,
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c
index fa69241b1187..85b812586ed4 100644
--- a/drivers/infiniband/sw/rxe/rxe_queue.c
+++ b/drivers/infiniband/sw/rxe/rxe_queue.c
@@ -52,9 +52,8 @@ inline void rxe_queue_reset(struct rxe_queue *q)
memset(q->buf->data, 0, q->buf_size - sizeof(struct rxe_queue_buf));
}
-struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe,
- int *num_elem,
- unsigned int elem_size)
+struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, int *num_elem,
+ unsigned int elem_size, enum queue_type type)
{
struct rxe_queue *q;
size_t buf_size;
@@ -69,6 +68,7 @@ struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe,
goto err1;
q->rxe = rxe;
+ q->type = type;
/* used in resize, only need to copy used part of queue */
q->elem_size = elem_size;
@@ -111,14 +111,15 @@ err1:
static int resize_finish(struct rxe_queue *q, struct rxe_queue *new_q,
unsigned int num_elem)
{
- if (!queue_empty(q) && (num_elem < queue_count(q)))
+ if (!queue_empty(q, q->type) && (num_elem < queue_count(q, q->type)))
return -EINVAL;
- while (!queue_empty(q)) {
- memcpy(producer_addr(new_q), consumer_addr(q),
- new_q->elem_size);
- advance_producer(new_q);
- advance_consumer(q);
+ while (!queue_empty(q, q->type)) {
+ memcpy(producer_addr(new_q, new_q->type),
+ consumer_addr(q, q->type),
+ new_q->elem_size);
+ advance_producer(new_q, new_q->type);
+ advance_consumer(q, q->type);
}
swap(*q, *new_q);
@@ -136,7 +137,7 @@ int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
int err;
unsigned long flags = 0, flags1;
- new_q = rxe_queue_init(q->rxe, &num_elem, elem_size);
+ new_q = rxe_queue_init(q->rxe, &num_elem, elem_size, q->type);
if (!new_q)
return -ENOMEM;
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.h b/drivers/infiniband/sw/rxe/rxe_queue.h
index 2902ca7b288c..2702b0e55fc3 100644
--- a/drivers/infiniband/sw/rxe/rxe_queue.h
+++ b/drivers/infiniband/sw/rxe/rxe_queue.h
@@ -17,8 +17,29 @@
* up to a power of 2. Since the queue is empty when the
* producer and consumer indices match the maximum capacity
* of the queue is one less than the number of element slots
+ *
+ * Notes:
+ * - Kernel space indices are always masked off to q->index_mask
+ * before storing so do not need to be checked on reads.
+ * - User space indices may be out of range and must be
+ * masked before use when read.
+ * - The kernel indices for shared queues must not be written
+ * by user space so a local copy is used and a shared copy is
+ * stored when the local copy changes.
+ * - By passing the type in the parameter list separate from q
+ * the compiler can eliminate the switch statement when the
+ * actual queue type is known when the function is called.
+ * In the performance path this is done. In less critical
+ * paths just q->type is passed.
*/
+/* type of queue */
+enum queue_type {
+ QUEUE_TYPE_KERNEL,
+ QUEUE_TYPE_TO_USER,
+ QUEUE_TYPE_FROM_USER,
+};
+
struct rxe_queue {
struct rxe_dev *rxe;
struct rxe_queue_buf *buf;
@@ -27,6 +48,13 @@ struct rxe_queue {
size_t elem_size;
unsigned int log2_elem_size;
u32 index_mask;
+ enum queue_type type;
+ /* private copy of index for shared queues between
+ * kernel space and user space. Kernel reads and writes
+ * this copy and then replicates to rxe_queue_buf
+ * for read access by user space.
+ */
+ u32 index;
};
int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
@@ -35,9 +63,8 @@ int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
void rxe_queue_reset(struct rxe_queue *q);
-struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe,
- int *num_elem,
- unsigned int elem_size);
+struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, int *num_elem,
+ unsigned int elem_size, enum queue_type type);
int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
unsigned int elem_size, struct ib_udata *udata,
@@ -54,120 +81,235 @@ static inline int next_index(struct rxe_queue *q, int index)
return (index + 1) & q->buf->index_mask;
}
-static inline int queue_empty(struct rxe_queue *q)
+static inline int queue_empty(struct rxe_queue *q, enum queue_type type)
{
u32 prod;
u32 cons;
- /* make sure all changes to queue complete before
- * testing queue empty
- */
- prod = smp_load_acquire(&q->buf->producer_index);
- /* same */
- cons = smp_load_acquire(&q->buf->consumer_index);
+ switch (type) {
+ case QUEUE_TYPE_FROM_USER:
+ /* protect user space index */
+ prod = smp_load_acquire(&q->buf->producer_index);
+ cons = q->index;
+ break;
+ case QUEUE_TYPE_TO_USER:
+ prod = q->index;
+ /* protect user space index */
+ cons = smp_load_acquire(&q->buf->consumer_index);
+ break;
+ case QUEUE_TYPE_KERNEL:
+ prod = q->buf->producer_index;
+ cons = q->buf->consumer_index;
+ break;
+ }
return ((prod - cons) & q->index_mask) == 0;
}
-static inline int queue_full(struct rxe_queue *q)
+static inline int queue_full(struct rxe_queue *q, enum queue_type type)
{
u32 prod;
u32 cons;
- /* make sure all changes to queue complete before
- * testing queue full
- */
- prod = smp_load_acquire(&q->buf->producer_index);
- /* same */
- cons = smp_load_acquire(&q->buf->consumer_index);
+ switch (type) {
+ case QUEUE_TYPE_FROM_USER:
+ /* protect user space index */
+ prod = smp_load_acquire(&q->buf->producer_index);
+ cons = q->index;
+ break;
+ case QUEUE_TYPE_TO_USER:
+ prod = q->index;
+ /* protect user space index */
+ cons = smp_load_acquire(&q->buf->consumer_index);
+ break;
+ case QUEUE_TYPE_KERNEL:
+ prod = q->buf->producer_index;
+ cons = q->buf->consumer_index;
+ break;
+ }
return ((prod + 1 - cons) & q->index_mask) == 0;
}
-static inline void advance_producer(struct rxe_queue *q)
+static inline unsigned int queue_count(const struct rxe_queue *q,
+ enum queue_type type)
{
u32 prod;
+ u32 cons;
- prod = (q->buf->producer_index + 1) & q->index_mask;
+ switch (type) {
+ case QUEUE_TYPE_FROM_USER:
+ /* protect user space index */
+ prod = smp_load_acquire(&q->buf->producer_index);
+ cons = q->index;
+ break;
+ case QUEUE_TYPE_TO_USER:
+ prod = q->index;
+ /* protect user space index */
+ cons = smp_load_acquire(&q->buf->consumer_index);
+ break;
+ case QUEUE_TYPE_KERNEL:
+ prod = q->buf->producer_index;
+ cons = q->buf->consumer_index;
+ break;
+ }
+
+ return (prod - cons) & q->index_mask;
+}
- /* make sure all changes to queue complete before
- * changing producer index
- */
- smp_store_release(&q->buf->producer_index, prod);
+static inline void advance_producer(struct rxe_queue *q, enum queue_type type)
+{
+ u32 prod;
+
+ switch (type) {
+ case QUEUE_TYPE_FROM_USER:
+ pr_warn_once("Normally kernel should not write user space index\n");
+ /* protect user space index */
+ prod = smp_load_acquire(&q->buf->producer_index);
+ prod = (prod + 1) & q->index_mask;
+ /* same */
+ smp_store_release(&q->buf->producer_index, prod);
+ break;
+ case QUEUE_TYPE_TO_USER:
+ prod = q->index;
+ q->index = (prod + 1) & q->index_mask;
+ q->buf->producer_index = q->index;
+ break;
+ case QUEUE_TYPE_KERNEL:
+ prod = q->buf->producer_index;
+ q->buf->producer_index = (prod + 1) & q->index_mask;
+ break;
+ }
}
-static inline void advance_consumer(struct rxe_queue *q)
+static inline void advance_consumer(struct rxe_queue *q, enum queue_type type)
{
u32 cons;
- cons = (q->buf->consumer_index + 1) & q->index_mask;
-
- /* make sure all changes to queue complete before
- * changing consumer index
- */
- smp_store_release(&q->buf->consumer_index, cons);
+ switch (type) {
+ case QUEUE_TYPE_FROM_USER:
+ cons = q->index;
+ q->index = (cons + 1) & q->index_mask;
+ q->buf->consumer_index = q->index;
+ break;
+ case QUEUE_TYPE_TO_USER:
+ pr_warn_once("Normally kernel should not write user space index\n");
+ /* protect user space index */
+ cons = smp_load_acquire(&q->buf->consumer_index);
+ cons = (cons + 1) & q->index_mask;
+ /* same */
+ smp_store_release(&q->buf->consumer_index, cons);
+ break;
+ case QUEUE_TYPE_KERNEL:
+ cons = q->buf->consumer_index;
+ q->buf->consumer_index = (cons + 1) & q->index_mask;
+ break;
+ }
}
-static inline void *producer_addr(struct rxe_queue *q)
+static inline void *producer_addr(struct rxe_queue *q, enum queue_type type)
{
- return q->buf->data + ((q->buf->producer_index & q->index_mask)
- << q->log2_elem_size);
+ u32 prod;
+
+ switch (type) {
+ case QUEUE_TYPE_FROM_USER:
+ /* protect user space index */
+ prod = smp_load_acquire(&q->buf->producer_index);
+ prod &= q->index_mask;
+ break;
+ case QUEUE_TYPE_TO_USER:
+ prod = q->index;
+ break;
+ case QUEUE_TYPE_KERNEL:
+ prod = q->buf->producer_index;
+ break;
+ }
+
+ return q->buf->data + (prod << q->log2_elem_size);
}
-static inline void *consumer_addr(struct rxe_queue *q)
+static inline void *consumer_addr(struct rxe_queue *q, enum queue_type type)
{
- return q->buf->data + ((q->buf->consumer_index & q->index_mask)
- << q->log2_elem_size);
+ u32 cons;
+
+ switch (type) {
+ case QUEUE_TYPE_FROM_USER:
+ cons = q->index;
+ break;
+ case QUEUE_TYPE_TO_USER:
+ /* protect user space index */
+ cons = smp_load_acquire(&q->buf->consumer_index);
+ cons &= q->index_mask;
+ break;
+ case QUEUE_TYPE_KERNEL:
+ cons = q->buf->consumer_index;
+ break;
+ }
+
+ return q->buf->data + (cons << q->log2_elem_size);
}
-static inline unsigned int producer_index(struct rxe_queue *q)
+static inline unsigned int producer_index(struct rxe_queue *q,
+ enum queue_type type)
{
- u32 index;
-
- /* make sure all changes to queue
- * complete before getting producer index
- */
- index = smp_load_acquire(&q->buf->producer_index);
- index &= q->index_mask;
+ u32 prod;
- return index;
+ switch (type) {
+ case QUEUE_TYPE_FROM_USER:
+ /* protect user space index */
+ prod = smp_load_acquire(&q->buf->producer_index);
+ prod &= q->index_mask;
+ break;
+ case QUEUE_TYPE_TO_USER:
+ prod = q->index;
+ break;
+ case QUEUE_TYPE_KERNEL:
+ prod = q->buf->producer_index;
+ break;
+ }
+
+ return prod;
}
-static inline unsigned int consumer_index(struct rxe_queue *q)
+static inline unsigned int consumer_index(struct rxe_queue *q,
+ enum queue_type type)
{
- u32 index;
-
- /* make sure all changes to queue
- * complete before getting consumer index
- */
- index = smp_load_acquire(&q->buf->consumer_index);
- index &= q->index_mask;
+ u32 cons;
- return index;
+ switch (type) {
+ case QUEUE_TYPE_FROM_USER:
+ cons = q->index;
+ break;
+ case QUEUE_TYPE_TO_USER:
+ /* protect user space index */
+ cons = smp_load_acquire(&q->buf->consumer_index);
+ cons &= q->index_mask;
+ break;
+ case QUEUE_TYPE_KERNEL:
+ cons = q->buf->consumer_index;
+ break;
+ }
+
+ return cons;
}
-static inline void *addr_from_index(struct rxe_queue *q, unsigned int index)
+static inline void *addr_from_index(struct rxe_queue *q,
+ unsigned int index)
{
return q->buf->data + ((index & q->index_mask)
<< q->buf->log2_elem_size);
}
static inline unsigned int index_from_addr(const struct rxe_queue *q,
- const void *addr)
+ const void *addr)
{
return (((u8 *)addr - q->buf->data) >> q->log2_elem_size)
- & q->index_mask;
-}
-
-static inline unsigned int queue_count(const struct rxe_queue *q)
-{
- return (q->buf->producer_index - q->buf->consumer_index)
- & q->index_mask;
+ & q->index_mask;
}
-static inline void *queue_head(struct rxe_queue *q)
+static inline void *queue_head(struct rxe_queue *q, enum queue_type type)
{
- return queue_empty(q) ? NULL : consumer_addr(q);
+ return queue_empty(q, type) ? NULL : consumer_addr(q, type);
}
#endif /* RXE_QUEUE_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 3664cdae7e1f..c57699cc6578 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -45,14 +45,24 @@ static void req_retry(struct rxe_qp *qp)
unsigned int mask;
int npsn;
int first = 1;
+ struct rxe_queue *q = qp->sq.queue;
+ unsigned int cons;
+ unsigned int prod;
- qp->req.wqe_index = consumer_index(qp->sq.queue);
+ if (qp->is_user) {
+ cons = consumer_index(q, QUEUE_TYPE_FROM_USER);
+ prod = producer_index(q, QUEUE_TYPE_FROM_USER);
+ } else {
+ cons = consumer_index(q, QUEUE_TYPE_KERNEL);
+ prod = producer_index(q, QUEUE_TYPE_KERNEL);
+ }
+
+ qp->req.wqe_index = cons;
qp->req.psn = qp->comp.psn;
qp->req.opcode = -1;
- for (wqe_index = consumer_index(qp->sq.queue);
- wqe_index != producer_index(qp->sq.queue);
- wqe_index = next_index(qp->sq.queue, wqe_index)) {
+ for (wqe_index = cons; wqe_index != prod;
+ wqe_index = next_index(q, wqe_index)) {
wqe = addr_from_index(qp->sq.queue, wqe_index);
mask = wr_opcode_mask(wqe->wr.opcode, qp);
@@ -104,8 +114,22 @@ void rnr_nak_timer(struct timer_list *t)
static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
{
- struct rxe_send_wqe *wqe = queue_head(qp->sq.queue);
+ struct rxe_send_wqe *wqe;
unsigned long flags;
+ struct rxe_queue *q = qp->sq.queue;
+ unsigned int index = qp->req.wqe_index;
+ unsigned int cons;
+ unsigned int prod;
+
+ if (qp->is_user) {
+ wqe = queue_head(q, QUEUE_TYPE_FROM_USER);
+ cons = consumer_index(q, QUEUE_TYPE_FROM_USER);
+ prod = producer_index(q, QUEUE_TYPE_FROM_USER);
+ } else {
+ wqe = queue_head(q, QUEUE_TYPE_KERNEL);
+ cons = consumer_index(q, QUEUE_TYPE_KERNEL);
+ prod = producer_index(q, QUEUE_TYPE_KERNEL);
+ }
if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
/* check to see if we are drained;
@@ -120,8 +144,7 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
break;
}
- if (wqe && ((qp->req.wqe_index !=
- consumer_index(qp->sq.queue)) ||
+ if (wqe && ((index != cons) ||
(wqe->state != wqe_state_posted))) {
/* comp not done yet */
spin_unlock_irqrestore(&qp->state_lock,
@@ -144,10 +167,10 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
} while (0);
}
- if (qp->req.wqe_index == producer_index(qp->sq.queue))
+ if (index == prod)
return NULL;
- wqe = addr_from_index(qp->sq.queue, qp->req.wqe_index);
+ wqe = addr_from_index(q, index);
if (unlikely((qp->req.state == QP_STATE_DRAIN ||
qp->req.state == QP_STATE_DRAINED) &&
@@ -155,7 +178,7 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
return NULL;
if (unlikely((wqe->wr.send_flags & IB_SEND_FENCE) &&
- (qp->req.wqe_index != consumer_index(qp->sq.queue)))) {
+ (index != cons))) {
qp->req.wait_fence = 1;
return NULL;
}
@@ -439,7 +462,7 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
return skb;
}
-static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
+static int finish_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
struct rxe_pkt_info *pkt, struct sk_buff *skb,
int paylen)
{
@@ -464,7 +487,7 @@ static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
} else {
err = copy_data(qp->pd, 0, &wqe->dma,
payload_addr(pkt), paylen,
- from_mr_obj,
+ RXE_FROM_MR_OBJ,
&crc);
if (err)
return err;
@@ -555,6 +578,60 @@ static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
jiffies + qp->qp_timeout_jiffies);
}
+static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
+{
+ u8 opcode = wqe->wr.opcode;
+ struct rxe_mr *mr;
+ u32 rkey;
+ int ret;
+
+ switch (opcode) {
+ case IB_WR_LOCAL_INV:
+ rkey = wqe->wr.ex.invalidate_rkey;
+ if (rkey_is_mw(rkey))
+ ret = rxe_invalidate_mw(qp, rkey);
+ else
+ ret = rxe_invalidate_mr(qp, rkey);
+
+ if (unlikely(ret)) {
+ wqe->status = IB_WC_LOC_QP_OP_ERR;
+ return ret;
+ }
+ break;
+ case IB_WR_REG_MR:
+ mr = to_rmr(wqe->wr.wr.reg.mr);
+ rxe_add_ref(mr);
+ mr->state = RXE_MR_STATE_VALID;
+ mr->access = wqe->wr.wr.reg.access;
+ mr->ibmr.lkey = wqe->wr.wr.reg.key;
+ mr->ibmr.rkey = wqe->wr.wr.reg.key;
+ mr->iova = wqe->wr.wr.reg.mr->iova;
+ rxe_drop_ref(mr);
+ break;
+ case IB_WR_BIND_MW:
+ ret = rxe_bind_mw(qp, wqe);
+ if (unlikely(ret)) {
+ wqe->status = IB_WC_MW_BIND_ERR;
+ return ret;
+ }
+ break;
+ default:
+ pr_err("Unexpected send wqe opcode %d\n", opcode);
+ wqe->status = IB_WC_LOC_QP_OP_ERR;
+ return -EINVAL;
+ }
+
+ wqe->state = wqe_state_done;
+ wqe->status = IB_WC_SUCCESS;
+ qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index);
+
+ if ((wqe->wr.send_flags & IB_SEND_SIGNALED) ||
+ qp->sq_sig_type == IB_SIGNAL_ALL_WR)
+ rxe_run_task(&qp->comp.task, 1);
+
+ return 0;
+}
+
int rxe_requester(void *arg)
{
struct rxe_qp *qp = (struct rxe_qp *)arg;
@@ -568,6 +645,7 @@ int rxe_requester(void *arg)
int ret;
struct rxe_send_wqe rollback_wqe;
u32 rollback_psn;
+ struct rxe_queue *q = qp->sq.queue;
rxe_add_ref(qp);
@@ -576,7 +654,7 @@ next_wqe:
goto exit;
if (unlikely(qp->req.state == QP_STATE_RESET)) {
- qp->req.wqe_index = consumer_index(qp->sq.queue);
+ qp->req.wqe_index = consumer_index(q, q->type);
qp->req.opcode = -1;
qp->req.need_rd_atomic = 0;
qp->req.wait_psn = 0;
@@ -593,43 +671,12 @@ next_wqe:
if (unlikely(!wqe))
goto exit;
- if (wqe->mask & WR_REG_MASK) {
- if (wqe->wr.opcode == IB_WR_LOCAL_INV) {
- struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
- struct rxe_mr *rmr;
-
- rmr = rxe_pool_get_index(&rxe->mr_pool,
- wqe->wr.ex.invalidate_rkey >> 8);
- if (!rmr) {
- pr_err("No mr for key %#x\n",
- wqe->wr.ex.invalidate_rkey);
- wqe->state = wqe_state_error;
- wqe->status = IB_WC_MW_BIND_ERR;
- goto exit;
- }
- rmr->state = RXE_MR_STATE_FREE;
- rxe_drop_ref(rmr);
- wqe->state = wqe_state_done;
- wqe->status = IB_WC_SUCCESS;
- } else if (wqe->wr.opcode == IB_WR_REG_MR) {
- struct rxe_mr *rmr = to_rmr(wqe->wr.wr.reg.mr);
-
- rmr->state = RXE_MR_STATE_VALID;
- rmr->access = wqe->wr.wr.reg.access;
- rmr->ibmr.lkey = wqe->wr.wr.reg.key;
- rmr->ibmr.rkey = wqe->wr.wr.reg.key;
- rmr->iova = wqe->wr.wr.reg.mr->iova;
- wqe->state = wqe_state_done;
- wqe->status = IB_WC_SUCCESS;
- } else {
- goto exit;
- }
- if ((wqe->wr.send_flags & IB_SEND_SIGNALED) ||
- qp->sq_sig_type == IB_SIGNAL_ALL_WR)
- rxe_run_task(&qp->comp.task, 1);
- qp->req.wqe_index = next_index(qp->sq.queue,
- qp->req.wqe_index);
- goto next_wqe;
+ if (wqe->mask & WR_LOCAL_OP_MASK) {
+ ret = rxe_do_local_ops(qp, wqe);
+ if (unlikely(ret))
+ goto err;
+ else
+ goto next_wqe;
}
if (unlikely(qp_type(qp) == IB_QPT_RC &&
@@ -687,11 +734,17 @@ next_wqe:
skb = init_req_packet(qp, wqe, opcode, payload, &pkt);
if (unlikely(!skb)) {
pr_err("qp#%d Failed allocating skb\n", qp_num(qp));
+ wqe->status = IB_WC_LOC_QP_OP_ERR;
goto err;
}
- if (fill_packet(qp, wqe, &pkt, skb, payload)) {
- pr_debug("qp#%d Error during fill packet\n", qp_num(qp));
+ ret = finish_packet(qp, wqe, &pkt, skb, payload);
+ if (unlikely(ret)) {
+ pr_debug("qp#%d Error during finish packet\n", qp_num(qp));
+ if (ret == -EFAULT)
+ wqe->status = IB_WC_LOC_PROT_ERR;
+ else
+ wqe->status = IB_WC_LOC_QP_OP_ERR;
kfree_skb(skb);
goto err;
}
@@ -716,6 +769,7 @@ next_wqe:
goto exit;
}
+ wqe->status = IB_WC_LOC_QP_OP_ERR;
goto err;
}
@@ -724,7 +778,6 @@ next_wqe:
goto next_wqe;
err:
- wqe->status = IB_WC_LOC_PROT_ERR;
wqe->state = wqe_state_error;
__rxe_do_task(&qp->comp.task);
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 2b220659bddb..3743dc39b60c 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -35,6 +35,7 @@ enum resp_states {
RESPST_ERR_TOO_MANY_RDMA_ATM_REQ,
RESPST_ERR_RNR,
RESPST_ERR_RKEY_VIOLATION,
+ RESPST_ERR_INVALIDATE_RKEY,
RESPST_ERR_LENGTH,
RESPST_ERR_CQ_OVERFLOW,
RESPST_ERROR,
@@ -68,6 +69,7 @@ static char *resp_state_name[] = {
[RESPST_ERR_TOO_MANY_RDMA_ATM_REQ] = "ERR_TOO_MANY_RDMA_ATM_REQ",
[RESPST_ERR_RNR] = "ERR_RNR",
[RESPST_ERR_RKEY_VIOLATION] = "ERR_RKEY_VIOLATION",
+ [RESPST_ERR_INVALIDATE_RKEY] = "ERR_INVALIDATE_RKEY_VIOLATION",
[RESPST_ERR_LENGTH] = "ERR_LENGTH",
[RESPST_ERR_CQ_OVERFLOW] = "ERR_CQ_OVERFLOW",
[RESPST_ERROR] = "ERROR",
@@ -293,26 +295,42 @@ static enum resp_states get_srq_wqe(struct rxe_qp *qp)
struct rxe_queue *q = srq->rq.queue;
struct rxe_recv_wqe *wqe;
struct ib_event ev;
+ unsigned int count;
+ size_t size;
if (srq->error)
return RESPST_ERR_RNR;
spin_lock_bh(&srq->rq.consumer_lock);
- wqe = queue_head(q);
+ if (qp->is_user)
+ wqe = queue_head(q, QUEUE_TYPE_FROM_USER);
+ else
+ wqe = queue_head(q, QUEUE_TYPE_KERNEL);
if (!wqe) {
spin_unlock_bh(&srq->rq.consumer_lock);
return RESPST_ERR_RNR;
}
- /* note kernel and user space recv wqes have same size */
- memcpy(&qp->resp.srq_wqe, wqe, sizeof(qp->resp.srq_wqe));
+ /* don't trust user space data */
+ if (unlikely(wqe->dma.num_sge > srq->rq.max_sge)) {
+ spin_unlock_bh(&srq->rq.consumer_lock);
+ pr_warn("%s: invalid num_sge in SRQ entry\n", __func__);
+ return RESPST_ERR_MALFORMED_WQE;
+ }
+ size = sizeof(wqe) + wqe->dma.num_sge*sizeof(struct rxe_sge);
+ memcpy(&qp->resp.srq_wqe, wqe, size);
qp->resp.wqe = &qp->resp.srq_wqe.wqe;
- advance_consumer(q);
+ if (qp->is_user) {
+ advance_consumer(q, QUEUE_TYPE_FROM_USER);
+ count = queue_count(q, QUEUE_TYPE_FROM_USER);
+ } else {
+ advance_consumer(q, QUEUE_TYPE_KERNEL);
+ count = queue_count(q, QUEUE_TYPE_KERNEL);
+ }
- if (srq->limit && srq->ibsrq.event_handler &&
- (queue_count(q) < srq->limit)) {
+ if (srq->limit && srq->ibsrq.event_handler && (count < srq->limit)) {
srq->limit = 0;
goto event;
}
@@ -339,7 +357,12 @@ static enum resp_states check_resource(struct rxe_qp *qp,
qp->resp.status = IB_WC_WR_FLUSH_ERR;
return RESPST_COMPLETE;
} else if (!srq) {
- qp->resp.wqe = queue_head(qp->rq.queue);
+ if (qp->is_user)
+ qp->resp.wqe = queue_head(qp->rq.queue,
+ QUEUE_TYPE_FROM_USER);
+ else
+ qp->resp.wqe = queue_head(qp->rq.queue,
+ QUEUE_TYPE_KERNEL);
if (qp->resp.wqe) {
qp->resp.status = IB_WC_WR_FLUSH_ERR;
return RESPST_COMPLETE;
@@ -366,7 +389,12 @@ static enum resp_states check_resource(struct rxe_qp *qp,
if (srq)
return get_srq_wqe(qp);
- qp->resp.wqe = queue_head(qp->rq.queue);
+ if (qp->is_user)
+ qp->resp.wqe = queue_head(qp->rq.queue,
+ QUEUE_TYPE_FROM_USER);
+ else
+ qp->resp.wqe = queue_head(qp->rq.queue,
+ QUEUE_TYPE_KERNEL);
return (qp->resp.wqe) ? RESPST_CHK_LENGTH : RESPST_ERR_RNR;
}
@@ -392,6 +420,7 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
struct rxe_pkt_info *pkt)
{
struct rxe_mr *mr = NULL;
+ struct rxe_mw *mw = NULL;
u64 va;
u32 rkey;
u32 resid;
@@ -403,6 +432,7 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
if (pkt->mask & (RXE_READ_MASK | RXE_WRITE_MASK)) {
if (pkt->mask & RXE_RETH_MASK) {
qp->resp.va = reth_va(pkt);
+ qp->resp.offset = 0;
qp->resp.rkey = reth_rkey(pkt);
qp->resp.resid = reth_len(pkt);
qp->resp.length = reth_len(pkt);
@@ -411,6 +441,7 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
: IB_ACCESS_REMOTE_WRITE;
} else if (pkt->mask & RXE_ATOMIC_MASK) {
qp->resp.va = atmeth_va(pkt);
+ qp->resp.offset = 0;
qp->resp.rkey = atmeth_rkey(pkt);
qp->resp.resid = sizeof(u64);
access = IB_ACCESS_REMOTE_ATOMIC;
@@ -430,18 +461,36 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
resid = qp->resp.resid;
pktlen = payload_size(pkt);
- mr = lookup_mr(qp->pd, access, rkey, lookup_remote);
- if (!mr) {
- state = RESPST_ERR_RKEY_VIOLATION;
- goto err;
- }
+ if (rkey_is_mw(rkey)) {
+ mw = rxe_lookup_mw(qp, access, rkey);
+ if (!mw) {
+ pr_err("%s: no MW matches rkey %#x\n", __func__, rkey);
+ state = RESPST_ERR_RKEY_VIOLATION;
+ goto err;
+ }
- if (unlikely(mr->state == RXE_MR_STATE_FREE)) {
- state = RESPST_ERR_RKEY_VIOLATION;
- goto err;
+ mr = mw->mr;
+ if (!mr) {
+ pr_err("%s: MW doesn't have an MR\n", __func__);
+ state = RESPST_ERR_RKEY_VIOLATION;
+ goto err;
+ }
+
+ if (mw->access & IB_ZERO_BASED)
+ qp->resp.offset = mw->addr;
+
+ rxe_drop_ref(mw);
+ rxe_add_ref(mr);
+ } else {
+ mr = lookup_mr(qp->pd, access, rkey, RXE_LOOKUP_REMOTE);
+ if (!mr) {
+ pr_err("%s: no MR matches rkey %#x\n", __func__, rkey);
+ state = RESPST_ERR_RKEY_VIOLATION;
+ goto err;
+ }
}
- if (mr_check_range(mr, va, resid)) {
+ if (mr_check_range(mr, va + qp->resp.offset, resid)) {
state = RESPST_ERR_RKEY_VIOLATION;
goto err;
}
@@ -475,6 +524,9 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
err:
if (mr)
rxe_drop_ref(mr);
+ if (mw)
+ rxe_drop_ref(mw);
+
return state;
}
@@ -484,7 +536,7 @@ static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr,
int err;
err = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma,
- data_addr, data_len, to_mr_obj, NULL);
+ data_addr, data_len, RXE_TO_MR_OBJ, NULL);
if (unlikely(err))
return (err == -ENOSPC) ? RESPST_ERR_LENGTH
: RESPST_ERR_MALFORMED_WQE;
@@ -499,8 +551,8 @@ static enum resp_states write_data_in(struct rxe_qp *qp,
int err;
int data_len = payload_size(pkt);
- err = rxe_mr_copy(qp->resp.mr, qp->resp.va, payload_addr(pkt), data_len,
- to_mr_obj, NULL);
+ err = rxe_mr_copy(qp->resp.mr, qp->resp.va + qp->resp.offset,
+ payload_addr(pkt), data_len, RXE_TO_MR_OBJ, NULL);
if (err) {
rc = RESPST_ERR_RKEY_VIOLATION;
goto out;
@@ -519,7 +571,6 @@ static DEFINE_SPINLOCK(atomic_ops_lock);
static enum resp_states process_atomic(struct rxe_qp *qp,
struct rxe_pkt_info *pkt)
{
- u64 iova = atmeth_va(pkt);
u64 *vaddr;
enum resp_states ret;
struct rxe_mr *mr = qp->resp.mr;
@@ -529,7 +580,7 @@ static enum resp_states process_atomic(struct rxe_qp *qp,
goto out;
}
- vaddr = iova_to_vaddr(mr, iova, sizeof(u64));
+ vaddr = iova_to_vaddr(mr, qp->resp.va + qp->resp.offset, sizeof(u64));
/* check vaddr is 8 bytes aligned. */
if (!vaddr || (uintptr_t)vaddr & 7) {
@@ -587,18 +638,11 @@ static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
ack->opcode = opcode;
ack->mask = rxe_opcode[opcode].mask;
ack->paylen = paylen;
-
- /* fill in bth using the request packet headers */
- memcpy(ack->hdr, pkt->hdr, RXE_BTH_BYTES);
-
- bth_set_opcode(ack, opcode);
- bth_set_qpn(ack, qp->attr.dest_qp_num);
- bth_set_pad(ack, pad);
- bth_set_se(ack, 0);
- bth_set_psn(ack, psn);
- bth_set_ack(ack, 0);
ack->psn = psn;
+ bth_init(ack, opcode, 0, 0, pad, IB_DEFAULT_PKEY_FULL,
+ qp->attr.dest_qp_num, 0, psn);
+
if (ack->mask & RXE_AETH_MASK) {
aeth_set_syn(ack, syndrome);
aeth_set_msn(ack, qp->resp.msn);
@@ -653,8 +697,10 @@ static enum resp_states read_reply(struct rxe_qp *qp,
res->type = RXE_READ_MASK;
res->replay = 0;
- res->read.va = qp->resp.va;
- res->read.va_org = qp->resp.va;
+ res->read.va = qp->resp.va +
+ qp->resp.offset;
+ res->read.va_org = qp->resp.va +
+ qp->resp.offset;
res->first_psn = req_pkt->psn;
@@ -701,7 +747,7 @@ static enum resp_states read_reply(struct rxe_qp *qp,
return RESPST_ERR_RNR;
err = rxe_mr_copy(res->read.mr, res->read.va, payload_addr(&ack_pkt),
- payload, from_mr_obj, &icrc);
+ payload, RXE_FROM_MR_OBJ, &icrc);
if (err)
pr_err("Failed copying memory\n");
@@ -739,16 +785,12 @@ static enum resp_states read_reply(struct rxe_qp *qp,
return state;
}
-static void build_rdma_network_hdr(union rdma_network_hdr *hdr,
- struct rxe_pkt_info *pkt)
+static int invalidate_rkey(struct rxe_qp *qp, u32 rkey)
{
- struct sk_buff *skb = PKT_TO_SKB(pkt);
-
- memset(hdr, 0, sizeof(*hdr));
- if (skb->protocol == htons(ETH_P_IP))
- memcpy(&hdr->roce4grh, ip_hdr(skb), sizeof(hdr->roce4grh));
- else if (skb->protocol == htons(ETH_P_IPV6))
- memcpy(&hdr->ibgrh, ipv6_hdr(skb), sizeof(hdr->ibgrh));
+ if (rkey_is_mw(rkey))
+ return rxe_invalidate_mw(qp, rkey);
+ else
+ return rxe_invalidate_mr(qp, rkey);
}
/* Executes a new request. A retried request never reach that function (send
@@ -757,16 +799,23 @@ static void build_rdma_network_hdr(union rdma_network_hdr *hdr,
static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
{
enum resp_states err;
+ struct sk_buff *skb = PKT_TO_SKB(pkt);
+ union rdma_network_hdr hdr;
if (pkt->mask & RXE_SEND_MASK) {
if (qp_type(qp) == IB_QPT_UD ||
qp_type(qp) == IB_QPT_SMI ||
qp_type(qp) == IB_QPT_GSI) {
- union rdma_network_hdr hdr;
-
- build_rdma_network_hdr(&hdr, pkt);
-
- err = send_data_in(qp, &hdr, sizeof(hdr));
+ if (skb->protocol == htons(ETH_P_IP)) {
+ memset(&hdr.reserved, 0,
+ sizeof(hdr.reserved));
+ memcpy(&hdr.roce4grh, ip_hdr(skb),
+ sizeof(hdr.roce4grh));
+ err = send_data_in(qp, &hdr, sizeof(hdr));
+ } else {
+ err = send_data_in(qp, ipv6_hdr(skb),
+ sizeof(hdr));
+ }
if (err)
return err;
}
@@ -790,6 +839,14 @@ static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
WARN_ON_ONCE(1);
}
+ if (pkt->mask & RXE_IETH_MASK) {
+ u32 rkey = ieth_rkey(pkt);
+
+ err = invalidate_rkey(qp, rkey);
+ if (err)
+ return RESPST_ERR_INVALIDATE_RKEY;
+ }
+
/* next expected psn, read handles this separately */
qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
qp->resp.ack_psn = qp->resp.psn;
@@ -822,13 +879,13 @@ static enum resp_states do_complete(struct rxe_qp *qp,
memset(&cqe, 0, sizeof(cqe));
if (qp->rcq->is_user) {
- uwc->status = qp->resp.status;
- uwc->qp_num = qp->ibqp.qp_num;
- uwc->wr_id = wqe->wr_id;
+ uwc->status = qp->resp.status;
+ uwc->qp_num = qp->ibqp.qp_num;
+ uwc->wr_id = wqe->wr_id;
} else {
- wc->status = qp->resp.status;
- wc->qp = &qp->ibqp;
- wc->wr_id = wqe->wr_id;
+ wc->status = qp->resp.status;
+ wc->qp = &qp->ibqp;
+ wc->wr_id = wqe->wr_id;
}
if (wc->status == IB_WC_SUCCESS) {
@@ -883,34 +940,25 @@ static enum resp_states do_complete(struct rxe_qp *qp,
}
if (pkt->mask & RXE_IETH_MASK) {
- struct rxe_mr *rmr;
-
wc->wc_flags |= IB_WC_WITH_INVALIDATE;
wc->ex.invalidate_rkey = ieth_rkey(pkt);
-
- rmr = rxe_pool_get_index(&rxe->mr_pool,
- wc->ex.invalidate_rkey >> 8);
- if (unlikely(!rmr)) {
- pr_err("Bad rkey %#x invalidation\n",
- wc->ex.invalidate_rkey);
- return RESPST_ERROR;
- }
- rmr->state = RXE_MR_STATE_FREE;
- rxe_drop_ref(rmr);
}
- wc->qp = &qp->ibqp;
-
if (pkt->mask & RXE_DETH_MASK)
wc->src_qp = deth_sqp(pkt);
+ wc->qp = &qp->ibqp;
wc->port_num = qp->attr.port_num;
}
}
/* have copy for srq and reference for !srq */
- if (!qp->srq)
- advance_consumer(qp->rq.queue);
+ if (!qp->srq) {
+ if (qp->is_user)
+ advance_consumer(qp->rq.queue, QUEUE_TYPE_FROM_USER);
+ else
+ advance_consumer(qp->rq.queue, QUEUE_TYPE_KERNEL);
+ }
qp->resp.wqe = NULL;
@@ -966,16 +1014,10 @@ static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
goto out;
}
- rxe_add_ref(qp);
-
res = &qp->resp.resources[qp->resp.res_head];
free_rd_atomic_resource(qp, res);
rxe_advance_resp_resource(qp);
- memcpy(SKB_TO_PKT(skb), &ack_pkt, sizeof(ack_pkt));
- memset((unsigned char *)SKB_TO_PKT(skb) + sizeof(ack_pkt), 0,
- sizeof(skb->cb) - sizeof(ack_pkt));
-
skb_get(skb);
res->type = RXE_ATOMIC_MASK;
res->atomic.skb = skb;
@@ -1176,6 +1218,7 @@ static enum resp_states do_class_d1e_error(struct rxe_qp *qp)
static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
{
struct sk_buff *skb;
+ struct rxe_queue *q = qp->rq.queue;
while ((skb = skb_dequeue(&qp->req_pkts))) {
rxe_drop_ref(qp);
@@ -1186,8 +1229,8 @@ static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
if (notify)
return;
- while (!qp->srq && qp->rq.queue && queue_head(qp->rq.queue))
- advance_consumer(qp->rq.queue);
+ while (!qp->srq && q && queue_head(q, q->type))
+ advance_consumer(q, q->type);
}
int rxe_responder(void *arg)
@@ -1314,6 +1357,13 @@ int rxe_responder(void *arg)
}
break;
+ case RESPST_ERR_INVALIDATE_RKEY:
+ /* RC - Class J. */
+ qp->resp.goto_error = 1;
+ qp->resp.status = IB_WC_REM_INV_REQ_ERR;
+ state = RESPST_COMPLETE;
+ break;
+
case RESPST_ERR_LENGTH:
if (qp_type(qp) == IB_QPT_RC) {
/* Class C */
diff --git a/drivers/infiniband/sw/rxe/rxe_srq.c b/drivers/infiniband/sw/rxe/rxe_srq.c
index 41b0d1e11baf..610c98d24b5c 100644
--- a/drivers/infiniband/sw/rxe/rxe_srq.c
+++ b/drivers/infiniband/sw/rxe/rxe_srq.c
@@ -78,6 +78,7 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
int err;
int srq_wqe_size;
struct rxe_queue *q;
+ enum queue_type type;
srq->ibsrq.event_handler = init->event_handler;
srq->ibsrq.srq_context = init->srq_context;
@@ -85,14 +86,16 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
srq->srq_num = srq->pelem.index;
srq->rq.max_wr = init->attr.max_wr;
srq->rq.max_sge = init->attr.max_sge;
+ srq->rq.is_user = srq->is_user;
srq_wqe_size = rcv_wqe_size(srq->rq.max_sge);
spin_lock_init(&srq->rq.producer_lock);
spin_lock_init(&srq->rq.consumer_lock);
+ type = uresp ? QUEUE_TYPE_FROM_USER : QUEUE_TYPE_KERNEL;
q = rxe_queue_init(rxe, &srq->rq.max_wr,
- srq_wqe_size);
+ srq_wqe_size, type);
if (!q) {
pr_warn("unable to allocate queue for srq\n");
return -ENOMEM;
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index aeb5e232c195..c223959ac174 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -216,8 +216,14 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
u32 length;
struct rxe_recv_wqe *recv_wqe;
int num_sge = ibwr->num_sge;
+ int full;
- if (unlikely(queue_full(rq->queue))) {
+ if (rq->is_user)
+ full = queue_full(rq->queue, QUEUE_TYPE_FROM_USER);
+ else
+ full = queue_full(rq->queue, QUEUE_TYPE_KERNEL);
+
+ if (unlikely(full)) {
err = -ENOMEM;
goto err1;
}
@@ -231,7 +237,11 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
for (i = 0; i < num_sge; i++)
length += ibwr->sg_list[i].length;
- recv_wqe = producer_addr(rq->queue);
+ if (rq->is_user)
+ recv_wqe = producer_addr(rq->queue, QUEUE_TYPE_FROM_USER);
+ else
+ recv_wqe = producer_addr(rq->queue, QUEUE_TYPE_KERNEL);
+
recv_wqe->wr_id = ibwr->wr_id;
recv_wqe->num_sge = num_sge;
@@ -244,7 +254,11 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
recv_wqe->dma.cur_sge = 0;
recv_wqe->dma.sge_offset = 0;
- advance_producer(rq->queue);
+ if (rq->is_user)
+ advance_producer(rq->queue, QUEUE_TYPE_FROM_USER);
+ else
+ advance_producer(rq->queue, QUEUE_TYPE_KERNEL);
+
return 0;
err1:
@@ -267,6 +281,9 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
if (udata->outlen < sizeof(*uresp))
return -EINVAL;
uresp = udata->outbuf;
+ srq->is_user = true;
+ } else {
+ srq->is_user = false;
}
err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
@@ -408,7 +425,9 @@ static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
err = -EINVAL;
goto err2;
}
- qp->is_user = 1;
+ qp->is_user = true;
+ } else {
+ qp->is_user = false;
}
rxe_add_index(qp);
@@ -577,7 +596,7 @@ static void init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
init_send_wr(qp, &wqe->wr, ibwr);
/* local operation */
- if (unlikely(mask & WR_REG_MASK)) {
+ if (unlikely(mask & WR_LOCAL_OP_MASK)) {
wqe->mask = mask;
wqe->state = wqe_state_posted;
return;
@@ -613,6 +632,7 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
struct rxe_sq *sq = &qp->sq;
struct rxe_send_wqe *send_wqe;
unsigned long flags;
+ int full;
err = validate_send_wr(qp, ibwr, mask, length);
if (err)
@@ -620,22 +640,31 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
spin_lock_irqsave(&qp->sq.sq_lock, flags);
- if (unlikely(queue_full(sq->queue))) {
- err = -ENOMEM;
- goto err1;
+ if (qp->is_user)
+ full = queue_full(sq->queue, QUEUE_TYPE_FROM_USER);
+ else
+ full = queue_full(sq->queue, QUEUE_TYPE_KERNEL);
+
+ if (unlikely(full)) {
+ spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
+ return -ENOMEM;
}
- send_wqe = producer_addr(sq->queue);
+ if (qp->is_user)
+ send_wqe = producer_addr(sq->queue, QUEUE_TYPE_FROM_USER);
+ else
+ send_wqe = producer_addr(sq->queue, QUEUE_TYPE_KERNEL);
+
init_send_wqe(qp, ibwr, mask, length, send_wqe);
- advance_producer(sq->queue);
+ if (qp->is_user)
+ advance_producer(sq->queue, QUEUE_TYPE_FROM_USER);
+ else
+ advance_producer(sq->queue, QUEUE_TYPE_KERNEL);
+
spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
return 0;
-
-err1:
- spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
- return err;
}
static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
@@ -823,12 +852,18 @@ static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
spin_lock_irqsave(&cq->cq_lock, flags);
for (i = 0; i < num_entries; i++) {
- cqe = queue_head(cq->queue);
+ if (cq->is_user)
+ cqe = queue_head(cq->queue, QUEUE_TYPE_TO_USER);
+ else
+ cqe = queue_head(cq->queue, QUEUE_TYPE_KERNEL);
if (!cqe)
break;
memcpy(wc++, &cqe->ibwc, sizeof(*wc));
- advance_consumer(cq->queue);
+ if (cq->is_user)
+ advance_consumer(cq->queue, QUEUE_TYPE_TO_USER);
+ else
+ advance_consumer(cq->queue, QUEUE_TYPE_KERNEL);
}
spin_unlock_irqrestore(&cq->cq_lock, flags);
@@ -838,7 +873,12 @@ static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
{
struct rxe_cq *cq = to_rcq(ibcq);
- int count = queue_count(cq->queue);
+ int count;
+
+ if (cq->is_user)
+ count = queue_count(cq->queue, QUEUE_TYPE_TO_USER);
+ else
+ count = queue_count(cq->queue, QUEUE_TYPE_KERNEL);
return (count > wc_cnt) ? wc_cnt : count;
}
@@ -848,12 +888,18 @@ static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
struct rxe_cq *cq = to_rcq(ibcq);
unsigned long irq_flags;
int ret = 0;
+ int empty;
spin_lock_irqsave(&cq->cq_lock, irq_flags);
if (cq->notify != IB_CQ_NEXT_COMP)
cq->notify = flags & IB_CQ_SOLICITED_MASK;
- if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !queue_empty(cq->queue))
+ if (cq->is_user)
+ empty = queue_empty(cq->queue, QUEUE_TYPE_TO_USER);
+ else
+ empty = queue_empty(cq->queue, QUEUE_TYPE_KERNEL);
+
+ if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !empty)
ret = 1;
spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
@@ -899,7 +945,7 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
rxe_add_ref(pd);
- err = rxe_mr_init_user(pd, start, length, iova, access, udata, mr);
+ err = rxe_mr_init_user(pd, start, length, iova, access, mr);
if (err)
goto err3;
@@ -913,17 +959,6 @@ err2:
return ERR_PTR(err);
}
-static int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
-{
- struct rxe_mr *mr = to_rmr(ibmr);
-
- mr->state = RXE_MR_STATE_ZOMBIE;
- rxe_drop_ref(mr_pd(mr));
- rxe_drop_index(mr);
- rxe_drop_ref(mr);
- return 0;
-}
-
static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
u32 max_num_sg)
{
@@ -1058,8 +1093,9 @@ static const struct ib_device_ops rxe_dev_ops = {
.driver_id = RDMA_DRIVER_RXE,
.uverbs_abi_ver = RXE_UVERBS_ABI_VERSION,
- .alloc_hw_stats = rxe_ib_alloc_hw_stats,
+ .alloc_hw_port_stats = rxe_ib_alloc_hw_port_stats,
.alloc_mr = rxe_alloc_mr,
+ .alloc_mw = rxe_alloc_mw,
.alloc_pd = rxe_alloc_pd,
.alloc_ucontext = rxe_alloc_ucontext,
.attach_mcast = rxe_attach_mcast,
@@ -1069,6 +1105,7 @@ static const struct ib_device_ops rxe_dev_ops = {
.create_srq = rxe_create_srq,
.create_user_ah = rxe_create_ah,
.dealloc_driver = rxe_dealloc,
+ .dealloc_mw = rxe_dealloc_mw,
.dealloc_pd = rxe_dealloc_pd,
.dealloc_ucontext = rxe_dealloc_ucontext,
.dereg_mr = rxe_dereg_mr,
@@ -1077,6 +1114,7 @@ static const struct ib_device_ops rxe_dev_ops = {
.destroy_qp = rxe_destroy_qp,
.destroy_srq = rxe_destroy_srq,
.detach_mcast = rxe_detach_mcast,
+ .device_group = &rxe_attr_group,
.enable_driver = rxe_enable_driver,
.get_dma_mr = rxe_get_dma_mr,
.get_hw_stats = rxe_ib_get_hw_stats,
@@ -1143,7 +1181,6 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
}
rxe->tfm = tfm;
- rdma_set_device_sysfs_group(dev, &rxe_attr_group);
err = ib_register_device(dev, ibdev_name, NULL);
if (err)
pr_warn("%s failed with error %d\n", __func__, err);
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index 11eba7a3ba8f..959a3260fcab 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -77,6 +77,7 @@ enum wqe_state {
};
struct rxe_sq {
+ bool is_user;
int max_wr;
int max_sge;
int max_inline;
@@ -85,6 +86,7 @@ struct rxe_sq {
};
struct rxe_rq {
+ bool is_user;
int max_wr;
int max_sge;
spinlock_t producer_lock; /* guard queue producer */
@@ -98,6 +100,7 @@ struct rxe_srq {
struct rxe_pd *pd;
struct rxe_rq rq;
u32 srq_num;
+ bool is_user;
int limit;
int error;
@@ -183,6 +186,7 @@ struct rxe_resp_info {
/* RDMA read / atomic only */
u64 va;
+ u64 offset;
struct rxe_mr *mr;
u32 resid;
u32 rkey;
@@ -211,7 +215,7 @@ struct rxe_qp {
struct ib_qp_attr attr;
unsigned int valid;
unsigned int mtu;
- int is_user;
+ bool is_user;
struct rxe_pd *pd;
struct rxe_srq *srq;
@@ -273,7 +277,16 @@ enum rxe_mr_type {
RXE_MR_TYPE_NONE,
RXE_MR_TYPE_DMA,
RXE_MR_TYPE_MR,
- RXE_MR_TYPE_MW,
+};
+
+enum rxe_mr_copy_dir {
+ RXE_TO_MR_OBJ,
+ RXE_FROM_MR_OBJ,
+};
+
+enum rxe_mr_lookup_type {
+ RXE_LOOKUP_LOCAL,
+ RXE_LOOKUP_REMOTE,
};
#define RXE_BUF_PER_MAP (PAGE_SIZE / sizeof(struct rxe_phys_buf))
@@ -287,6 +300,13 @@ struct rxe_map {
struct rxe_phys_buf buf[RXE_BUF_PER_MAP];
};
+static inline int rkey_is_mw(u32 rkey)
+{
+ u32 index = rkey >> 8;
+
+ return (index >= RXE_MIN_MW_INDEX) && (index <= RXE_MAX_MW_INDEX);
+}
+
struct rxe_mr {
struct rxe_pool_entry pelem;
struct ib_mr ibmr;
@@ -312,18 +332,27 @@ struct rxe_mr {
u32 max_buf;
u32 num_map;
+ atomic_t num_mw;
+
struct rxe_map **map;
};
enum rxe_mw_state {
- RXE_MW_STATE_INVALID = RXE_MR_STATE_INVALID,
- RXE_MW_STATE_FREE = RXE_MR_STATE_FREE,
- RXE_MW_STATE_VALID = RXE_MR_STATE_VALID,
+ RXE_MW_STATE_INVALID = RXE_MR_STATE_INVALID,
+ RXE_MW_STATE_FREE = RXE_MR_STATE_FREE,
+ RXE_MW_STATE_VALID = RXE_MR_STATE_VALID,
};
struct rxe_mw {
- struct ib_mw ibmw;
- struct rxe_pool_entry pelem;
+ struct ib_mw ibmw;
+ struct rxe_pool_entry pelem;
+ spinlock_t lock;
+ enum rxe_mw_state state;
+ struct rxe_qp *qp; /* Type 2 only */
+ struct rxe_mr *mr;
+ int access;
+ u64 addr;
+ u64 length;
};
struct rxe_mc_grp {
@@ -455,6 +484,16 @@ static inline u32 mr_rkey(struct rxe_mr *mr)
return mr->ibmr.rkey;
}
+static inline struct rxe_pd *rxe_mw_pd(struct rxe_mw *mw)
+{
+ return to_rpd(mw->ibmw.pd);
+}
+
+static inline u32 rxe_mw_rkey(struct rxe_mw *mw)
+{
+ return mw->ibmw.rkey;
+}
+
int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name);
void rxe_mc_cleanup(struct rxe_pool_entry *arg);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 75cd44789661..44d8d151ff90 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -454,7 +454,7 @@ struct ipoib_neigh {
struct list_head list;
struct ipoib_neigh __rcu *hnext;
struct rcu_head rcu;
- atomic_t refcnt;
+ refcount_t refcnt;
unsigned long alive;
};
@@ -464,7 +464,7 @@ struct ipoib_neigh {
void ipoib_neigh_dtor(struct ipoib_neigh *neigh);
static inline void ipoib_neigh_put(struct ipoib_neigh *neigh)
{
- if (atomic_dec_and_test(&neigh->refcnt))
+ if (refcount_dec_and_test(&neigh->refcnt))
ipoib_neigh_dtor(neigh);
}
struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 9dbc85a6b702..684c2ddb16f5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1503,7 +1503,7 @@ static void ipoib_cm_stale_task(struct work_struct *work)
spin_unlock_irq(&priv->lock);
}
-static ssize_t show_mode(struct device *d, struct device_attribute *attr,
+static ssize_t mode_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct net_device *dev = to_net_dev(d);
@@ -1515,8 +1515,8 @@ static ssize_t show_mode(struct device *d, struct device_attribute *attr,
return sysfs_emit(buf, "datagram\n");
}
-static ssize_t set_mode(struct device *d, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t mode_store(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct net_device *dev = to_net_dev(d);
int ret;
@@ -1542,7 +1542,7 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
return (!ret || ret == -EBUSY) ? count : ret;
}
-static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, show_mode, set_mode);
+static DEVICE_ATTR_RW(mode);
int ipoib_cm_add_mode_attr(struct net_device *dev)
{
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index bbb18087fdab..abf60f4d9203 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -316,7 +316,7 @@ static bool ipoib_is_dev_match_addr_rcu(const struct sockaddr *addr,
return false;
}
-/**
+/*
* Find the master net_device on top of the given net_device.
* @dev: base IPoIB net_device
*
@@ -361,8 +361,9 @@ static int ipoib_upper_walk(struct net_device *upper,
}
/**
- * Find a net_device matching the given address, which is an upper device of
- * the given net_device.
+ * ipoib_get_net_dev_match_addr - Find a net_device matching
+ * the given address, which is an upper device of the given net_device.
+ *
* @addr: IP address to look for.
* @dev: base IPoIB net_device
*
@@ -1287,7 +1288,7 @@ struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr)
neigh = rcu_dereference_bh(neigh->hnext)) {
if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) {
/* found, take one ref on behalf of the caller */
- if (!atomic_inc_not_zero(&neigh->refcnt)) {
+ if (!refcount_inc_not_zero(&neigh->refcnt)) {
/* deleted */
neigh = NULL;
goto out_unlock;
@@ -1382,7 +1383,7 @@ static struct ipoib_neigh *ipoib_neigh_ctor(u8 *daddr,
INIT_LIST_HEAD(&neigh->list);
ipoib_cm_set(neigh, NULL);
/* one ref on behalf of the caller */
- atomic_set(&neigh->refcnt, 1);
+ refcount_set(&neigh->refcnt, 1);
return neigh;
}
@@ -1414,7 +1415,7 @@ struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr,
lockdep_is_held(&priv->lock))) {
if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) {
/* found, take one ref on behalf of the caller */
- if (!atomic_inc_not_zero(&neigh->refcnt)) {
+ if (!refcount_inc_not_zero(&neigh->refcnt)) {
/* deleted */
neigh = NULL;
break;
@@ -1429,7 +1430,7 @@ struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr,
goto out_unlock;
/* one ref on behalf of the hash table */
- atomic_inc(&neigh->refcnt);
+ refcount_inc(&neigh->refcnt);
neigh->alive = jiffies;
/* put in hash */
rcu_assign_pointer(neigh->hnext,
@@ -2268,18 +2269,18 @@ void ipoib_intf_free(struct net_device *dev)
kfree(priv);
}
-static ssize_t show_pkey(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t pkey_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct net_device *ndev = to_net_dev(dev);
struct ipoib_dev_priv *priv = ipoib_priv(ndev);
return sysfs_emit(buf, "0x%04x\n", priv->pkey);
}
-static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
+static DEVICE_ATTR_RO(pkey);
-static ssize_t show_umcast(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t umcast_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct net_device *ndev = to_net_dev(dev);
struct ipoib_dev_priv *priv = ipoib_priv(ndev);
@@ -2300,9 +2301,8 @@ void ipoib_set_umcast(struct net_device *ndev, int umcast_val)
clear_bit(IPOIB_FLAG_UMCAST, &priv->flags);
}
-static ssize_t set_umcast(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t umcast_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
unsigned long umcast_val = simple_strtoul(buf, NULL, 0);
@@ -2310,7 +2310,7 @@ static ssize_t set_umcast(struct device *dev,
return count;
}
-static DEVICE_ATTR(umcast, S_IWUSR | S_IRUGO, show_umcast, set_umcast);
+static DEVICE_ATTR_RW(umcast);
int ipoib_add_umcast_attr(struct net_device *dev)
{
@@ -2381,9 +2381,9 @@ static int ipoib_set_mac(struct net_device *dev, void *addr)
return 0;
}
-static ssize_t create_child(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t create_child_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
int pkey;
int ret;
@@ -2398,11 +2398,11 @@ static ssize_t create_child(struct device *dev,
return ret ? ret : count;
}
-static DEVICE_ATTR(create_child, S_IWUSR, NULL, create_child);
+static DEVICE_ATTR_WO(create_child);
-static ssize_t delete_child(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t delete_child_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
int pkey;
int ret;
@@ -2418,7 +2418,7 @@ static ssize_t delete_child(struct device *dev,
return ret ? ret : count;
}
-static DEVICE_ATTR(delete_child, S_IWUSR, NULL, delete_child);
+static DEVICE_ATTR_WO(delete_child);
int ipoib_add_pkey_attr(struct net_device *dev)
{
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 5958840dbeed..0322dc75396f 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -40,7 +40,7 @@
#include "ipoib.h"
-static ssize_t show_parent(struct device *d, struct device_attribute *attr,
+static ssize_t parent_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct net_device *dev = to_net_dev(d);
@@ -48,7 +48,7 @@ static ssize_t show_parent(struct device *d, struct device_attribute *attr,
return sysfs_emit(buf, "%s\n", priv->parent->name);
}
-static DEVICE_ATTR(parent, S_IRUGO, show_parent, NULL);
+static DEVICE_ATTR_RO(parent);
static bool is_child_unique(struct ipoib_dev_priv *ppriv,
struct ipoib_dev_priv *priv)
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 136f6c4492e0..b44cbb8e84eb 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -761,7 +761,7 @@ void iser_conn_init(struct iser_conn *iser_conn)
ib_conn->reg_cqe.done = iser_reg_comp;
}
- /**
+/*
* starts the process of connecting to the target
* sleeps until the connection is established or rejected
*/
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 18266f07c58d..636d590765f9 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -35,10 +35,10 @@ static const struct kernel_param_ops sg_tablesize_ops = {
.get = param_get_int,
};
-static int isert_sg_tablesize = ISCSI_ISER_DEF_SG_TABLESIZE;
+static int isert_sg_tablesize = ISCSI_ISER_MIN_SG_TABLESIZE;
module_param_cb(sg_tablesize, &sg_tablesize_ops, &isert_sg_tablesize, 0644);
MODULE_PARM_DESC(sg_tablesize,
- "Number of gather/scatter entries in a single scsi command, should >= 128 (default: 256, max: 4096)");
+ "Number of gather/scatter entries in a single scsi command, should >= 128 (default: 128, max: 4096)");
static DEFINE_MUTEX(device_list_mutex);
static LIST_HEAD(device_list);
@@ -2231,6 +2231,16 @@ isert_setup_id(struct isert_np *isert_np)
}
isert_dbg("id %p context %p\n", id, id->context);
+ /*
+ * Allow both IPv4 and IPv6 sockets to bind a single port
+ * at the same time.
+ */
+ ret = rdma_set_afonly(id, 1);
+ if (ret) {
+ isert_err("rdma_set_afonly() failed: %d\n", ret);
+ goto out_id;
+ }
+
ret = rdma_bind_addr(id, sa);
if (ret) {
isert_err("rdma_bind_addr() failed: %d\n", ret);
@@ -2387,10 +2397,10 @@ accept_wait:
spin_unlock_bh(&np->np_thread_lock);
isert_dbg("np_thread_state %d\n",
np->np_thread_state);
- /**
+ /*
* No point in stalling here when np_thread
* is in state RESET/SHUTDOWN/EXIT - bail
- **/
+ */
return -ENODEV;
}
spin_unlock_bh(&np->np_thread_lock);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 6c5af13db4e0..ca8cfebe26ca 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -65,9 +65,6 @@
*/
#define ISER_RX_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 1024)
-/* Default I/O size is 1MB */
-#define ISCSI_ISER_DEF_SG_TABLESIZE 256
-
/* Minimum I/O size is 512KB */
#define ISCSI_ISER_MIN_SG_TABLESIZE 128
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
index 7d53d18a5004..4ee592ccf979 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
@@ -250,7 +250,6 @@ static ssize_t rtrs_clt_disconnect_store(struct kobject *kobj,
const char *buf, size_t count)
{
struct rtrs_clt_sess *sess;
- int ret;
sess = container_of(kobj, struct rtrs_clt_sess, kobj);
if (!sysfs_streq(buf, "1")) {
@@ -258,9 +257,7 @@ static ssize_t rtrs_clt_disconnect_store(struct kobject *kobj,
attr->attr.name, buf);
return -EINVAL;
}
- ret = rtrs_clt_disconnect_from_sysfs(sess);
- if (ret)
- return ret;
+ rtrs_clt_close_conns(sess, true);
return count;
}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
index 0a794d748a7a..f2c40e50f25e 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -32,6 +32,8 @@
#define RTRS_RECONNECT_SEED 8
#define FIRST_CONN 0x01
+/* limit to 128 * 4k = 512k max IO */
+#define RTRS_MAX_SEGMENTS 128
MODULE_DESCRIPTION("RDMA Transport Client");
MODULE_LICENSE("GPL");
@@ -412,6 +414,7 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
req->inv_errno = errno;
}
+ refcount_inc(&req->ref);
err = rtrs_inv_rkey(req);
if (unlikely(err)) {
rtrs_err(con->c.sess, "Send INV WR key=%#x: %d\n",
@@ -427,10 +430,14 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
return;
}
+ if (!refcount_dec_and_test(&req->ref))
+ return;
}
ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist,
req->sg_cnt, req->dir);
}
+ if (!refcount_dec_and_test(&req->ref))
+ return;
if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
atomic_dec(&sess->stats->inflight);
@@ -438,10 +445,9 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
req->con = NULL;
if (errno) {
- rtrs_err_rl(con->c.sess,
- "IO request failed: error=%d path=%s [%s:%u]\n",
+ rtrs_err_rl(con->c.sess, "IO request failed: error=%d path=%s [%s:%u] notify=%d\n",
errno, kobject_name(&sess->kobj), sess->hca_name,
- sess->hca_port);
+ sess->hca_port, notify);
}
if (notify)
@@ -480,7 +486,7 @@ static int rtrs_post_send_rdma(struct rtrs_clt_con *con,
return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, &sge, 1,
rbuf->rkey, rbuf->addr + off,
- imm, flags, wr);
+ imm, flags, wr, NULL);
}
static void process_io_rsp(struct rtrs_clt_sess *sess, u32 msg_id,
@@ -655,7 +661,6 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
rtrs_err(con->c.sess, "rtrs_post_recv_empty(): %d\n",
err);
rtrs_rdma_error_recovery(con);
- break;
}
break;
case IB_WC_RECV:
@@ -814,6 +819,9 @@ static struct rtrs_clt_sess *get_next_path_min_inflight(struct path_it *it)
int inflight;
list_for_each_entry_rcu(sess, &clt->paths_list, s.entry) {
+ if (unlikely(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED))
+ continue;
+
if (unlikely(!list_empty(raw_cpu_ptr(sess->mp_skip_entry))))
continue;
@@ -913,7 +921,7 @@ static inline void path_it_deinit(struct path_it *it)
}
/**
- * rtrs_clt_init_req() Initialize an rtrs_clt_io_req holding information
+ * rtrs_clt_init_req() - Initialize an rtrs_clt_io_req holding information
* about an inflight IO.
* The user buffer holding user control message (not data) is copied into
* the corresponding buffer of rtrs_iu (req->iu->buf), which later on will
@@ -954,6 +962,7 @@ static void rtrs_clt_init_req(struct rtrs_clt_io_req *req,
req->need_inv = false;
req->need_inv_comp = false;
req->inv_errno = 0;
+ refcount_set(&req->ref, 1);
iov_iter_kvec(&iter, READ, vec, 1, usr_len);
len = _copy_from_iter(req->iu->buf, usr_len, &iter);
@@ -997,9 +1006,10 @@ rtrs_clt_get_copy_req(struct rtrs_clt_sess *alive_sess,
}
static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
- struct rtrs_clt_io_req *req,
- struct rtrs_rbuf *rbuf,
- u32 size, u32 imm)
+ struct rtrs_clt_io_req *req,
+ struct rtrs_rbuf *rbuf, bool fr_en,
+ u32 size, u32 imm, struct ib_send_wr *wr,
+ struct ib_send_wr *tail)
{
struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
struct ib_sge *sge = req->sge;
@@ -1007,18 +1017,28 @@ static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
struct scatterlist *sg;
size_t num_sge;
int i;
-
- for_each_sg(req->sglist, sg, req->sg_cnt, i) {
- sge[i].addr = sg_dma_address(sg);
- sge[i].length = sg_dma_len(sg);
- sge[i].lkey = sess->s.dev->ib_pd->local_dma_lkey;
+ struct ib_send_wr *ptail = NULL;
+
+ if (fr_en) {
+ i = 0;
+ sge[i].addr = req->mr->iova;
+ sge[i].length = req->mr->length;
+ sge[i].lkey = req->mr->lkey;
+ i++;
+ num_sge = 2;
+ ptail = tail;
+ } else {
+ for_each_sg(req->sglist, sg, req->sg_cnt, i) {
+ sge[i].addr = sg_dma_address(sg);
+ sge[i].length = sg_dma_len(sg);
+ sge[i].lkey = sess->s.dev->ib_pd->local_dma_lkey;
+ }
+ num_sge = 1 + req->sg_cnt;
}
sge[i].addr = req->iu->dma_addr;
sge[i].length = size;
sge[i].lkey = sess->s.dev->ib_pd->local_dma_lkey;
- num_sge = 1 + req->sg_cnt;
-
/*
* From time to time we have to post signalled sends,
* or send queue will fill up and only QP reset can help.
@@ -1031,7 +1051,22 @@ static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, sge, num_sge,
rbuf->rkey, rbuf->addr, imm,
- flags, NULL);
+ flags, wr, ptail);
+}
+
+static int rtrs_map_sg_fr(struct rtrs_clt_io_req *req, size_t count)
+{
+ int nr;
+
+ /* Align the MR to a 4K page size to match the block virt boundary */
+ nr = ib_map_mr_sg(req->mr, req->sglist, count, NULL, SZ_4K);
+ if (nr < 0)
+ return nr;
+ if (unlikely(nr < req->sg_cnt))
+ return -EINVAL;
+ ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
+
+ return nr;
}
static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
@@ -1044,6 +1079,10 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
struct rtrs_rbuf *rbuf;
int ret, count = 0;
u32 imm, buf_id;
+ struct ib_reg_wr rwr;
+ struct ib_send_wr inv_wr;
+ struct ib_send_wr *wr = NULL;
+ bool fr_en = false;
const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len;
@@ -1072,15 +1111,43 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
req->sg_size = tsize;
rbuf = &sess->rbufs[buf_id];
+ if (count) {
+ ret = rtrs_map_sg_fr(req, count);
+ if (ret < 0) {
+ rtrs_err_rl(s,
+ "Write request failed, failed to map fast reg. data, err: %d\n",
+ ret);
+ ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist,
+ req->sg_cnt, req->dir);
+ return ret;
+ }
+ inv_wr = (struct ib_send_wr) {
+ .opcode = IB_WR_LOCAL_INV,
+ .wr_cqe = &req->inv_cqe,
+ .send_flags = IB_SEND_SIGNALED,
+ .ex.invalidate_rkey = req->mr->rkey,
+ };
+ req->inv_cqe.done = rtrs_clt_inv_rkey_done;
+ rwr = (struct ib_reg_wr) {
+ .wr.opcode = IB_WR_REG_MR,
+ .wr.wr_cqe = &fast_reg_cqe,
+ .mr = req->mr,
+ .key = req->mr->rkey,
+ .access = (IB_ACCESS_LOCAL_WRITE),
+ };
+ wr = &rwr.wr;
+ fr_en = true;
+ refcount_inc(&req->ref);
+ }
/*
* Update stats now, after request is successfully sent it is not
* safe anymore to touch it.
*/
rtrs_clt_update_all_stats(req, WRITE);
- ret = rtrs_post_rdma_write_sg(req->con, req, rbuf,
- req->usr_len + sizeof(*msg),
- imm);
+ ret = rtrs_post_rdma_write_sg(req->con, req, rbuf, fr_en,
+ req->usr_len + sizeof(*msg),
+ imm, wr, &inv_wr);
if (unlikely(ret)) {
rtrs_err_rl(s,
"Write request failed: error=%d path=%s [%s:%u]\n",
@@ -1096,21 +1163,6 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
return ret;
}
-static int rtrs_map_sg_fr(struct rtrs_clt_io_req *req, size_t count)
-{
- int nr;
-
- /* Align the MR to a 4K page size to match the block virt boundary */
- nr = ib_map_mr_sg(req->mr, req->sglist, count, NULL, SZ_4K);
- if (nr < 0)
- return nr;
- if (unlikely(nr < req->sg_cnt))
- return -EINVAL;
- ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
-
- return nr;
-}
-
static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
{
struct rtrs_clt_con *con = req->con;
@@ -1219,7 +1271,7 @@ static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
}
/**
- * rtrs_clt_failover_req() Try to find an active path for a failed request
+ * rtrs_clt_failover_req() - Try to find an active path for a failed request
* @clt: clt context
* @fail_req: a failed io request.
*/
@@ -1305,7 +1357,6 @@ static void free_sess_reqs(struct rtrs_clt_sess *sess)
static int alloc_sess_reqs(struct rtrs_clt_sess *sess)
{
struct rtrs_clt_io_req *req;
- struct rtrs_clt *clt = sess->clt;
int i, err = -ENOMEM;
sess->reqs = kcalloc(sess->queue_depth, sizeof(*sess->reqs),
@@ -1322,8 +1373,7 @@ static int alloc_sess_reqs(struct rtrs_clt_sess *sess)
if (!req->iu)
goto out;
- req->sge = kmalloc_array(clt->max_segments + 1,
- sizeof(*req->sge), GFP_KERNEL);
+ req->sge = kcalloc(2, sizeof(*req->sge), GFP_KERNEL);
if (!req->sge)
goto out;
@@ -1415,7 +1465,8 @@ static void query_fast_reg_mode(struct rtrs_clt_sess *sess)
sess->max_pages_per_mr =
min3(sess->max_pages_per_mr, (u32)max_pages_per_mr,
ib_dev->attrs.max_fast_reg_page_list_len);
- sess->max_send_sge = ib_dev->attrs.max_send_sge;
+ sess->clt->max_segments =
+ min(sess->max_pages_per_mr, sess->clt->max_segments);
}
static bool rtrs_clt_change_state_get_old(struct rtrs_clt_sess *sess,
@@ -1449,23 +1500,12 @@ static void rtrs_clt_init_hb(struct rtrs_clt_sess *sess)
rtrs_wq);
}
-static void rtrs_clt_start_hb(struct rtrs_clt_sess *sess)
-{
- rtrs_start_hb(&sess->s);
-}
-
-static void rtrs_clt_stop_hb(struct rtrs_clt_sess *sess)
-{
- rtrs_stop_hb(&sess->s);
-}
-
static void rtrs_clt_reconnect_work(struct work_struct *work);
static void rtrs_clt_close_work(struct work_struct *work);
static struct rtrs_clt_sess *alloc_sess(struct rtrs_clt *clt,
- const struct rtrs_addr *path,
- size_t con_num, u16 max_segments,
- u32 nr_poll_queues)
+ const struct rtrs_addr *path,
+ size_t con_num, u32 nr_poll_queues)
{
struct rtrs_clt_sess *sess;
int err = -ENOMEM;
@@ -1505,9 +1545,9 @@ static struct rtrs_clt_sess *alloc_sess(struct rtrs_clt *clt,
if (path->src)
memcpy(&sess->s.src_addr, path->src,
rdma_addr_size((struct sockaddr *)path->src));
- strlcpy(sess->s.sessname, clt->sessname, sizeof(sess->s.sessname));
+ strscpy(sess->s.sessname, clt->sessname, sizeof(sess->s.sessname));
sess->clt = clt;
- sess->max_pages_per_mr = max_segments;
+ sess->max_pages_per_mr = RTRS_MAX_SEGMENTS;
init_waitqueue_head(&sess->state_wq);
sess->state = RTRS_CLT_CONNECTING;
atomic_set(&sess->connected_cnt, 0);
@@ -1581,20 +1621,13 @@ static void destroy_con(struct rtrs_clt_con *con)
static int create_con_cq_qp(struct rtrs_clt_con *con)
{
struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
- u32 max_send_wr, max_recv_wr, cq_size;
+ u32 max_send_wr, max_recv_wr, cq_num, max_send_sge, wr_limit;
int err, cq_vector;
struct rtrs_msg_rkey_rsp *rsp;
lockdep_assert_held(&con->con_mutex);
if (con->c.cid == 0) {
- /*
- * One completion for each receive and two for each send
- * (send request + registration)
- * + 2 for drain and heartbeat
- * in case qp gets into error state
- */
- max_send_wr = SERVICE_CON_QUEUE_DEPTH * 2 + 2;
- max_recv_wr = SERVICE_CON_QUEUE_DEPTH * 2 + 2;
+ max_send_sge = 1;
/* We must be the first here */
if (WARN_ON(sess->s.dev))
return -EINVAL;
@@ -1613,6 +1646,17 @@ static int create_con_cq_qp(struct rtrs_clt_con *con)
}
sess->s.dev_ref = 1;
query_fast_reg_mode(sess);
+ wr_limit = sess->s.dev->ib_dev->attrs.max_qp_wr;
+ /*
+ * Two (request + registration) completion for send
+ * Two for recv if always_invalidate is set on server
+ * or one for recv.
+ * + 2 for drain and heartbeat
+ * in case qp gets into error state.
+ */
+ max_send_wr =
+ min_t(int, wr_limit, SERVICE_CON_QUEUE_DEPTH * 2 + 2);
+ max_recv_wr = max_send_wr;
} else {
/*
* Here we assume that session members are correctly set.
@@ -1624,35 +1668,36 @@ static int create_con_cq_qp(struct rtrs_clt_con *con)
if (WARN_ON(!sess->queue_depth))
return -EINVAL;
+ wr_limit = sess->s.dev->ib_dev->attrs.max_qp_wr;
/* Shared between connections */
sess->s.dev_ref++;
- max_send_wr =
- min_t(int, sess->s.dev->ib_dev->attrs.max_qp_wr,
+ max_send_wr = min_t(int, wr_limit,
/* QD * (REQ + RSP + FR REGS or INVS) + drain */
sess->queue_depth * 3 + 1);
- max_recv_wr =
- min_t(int, sess->s.dev->ib_dev->attrs.max_qp_wr,
+ max_recv_wr = min_t(int, wr_limit,
sess->queue_depth * 3 + 1);
+ max_send_sge = 2;
}
+ cq_num = max_send_wr + max_recv_wr;
/* alloc iu to recv new rkey reply when server reports flags set */
if (sess->flags & RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) {
- con->rsp_ius = rtrs_iu_alloc(max_recv_wr, sizeof(*rsp),
+ con->rsp_ius = rtrs_iu_alloc(cq_num, sizeof(*rsp),
GFP_KERNEL, sess->s.dev->ib_dev,
DMA_FROM_DEVICE,
rtrs_clt_rdma_done);
if (!con->rsp_ius)
return -ENOMEM;
- con->queue_size = max_recv_wr;
+ con->queue_num = cq_num;
}
- cq_size = max_send_wr + max_recv_wr;
+ cq_num = max_send_wr + max_recv_wr;
cq_vector = con->cpu % sess->s.dev->ib_dev->num_comp_vectors;
if (con->c.cid >= sess->s.irq_con_num)
- err = rtrs_cq_qp_create(&sess->s, &con->c, sess->max_send_sge,
- cq_vector, cq_size, max_send_wr,
+ err = rtrs_cq_qp_create(&sess->s, &con->c, max_send_sge,
+ cq_vector, cq_num, max_send_wr,
max_recv_wr, IB_POLL_DIRECT);
else
- err = rtrs_cq_qp_create(&sess->s, &con->c, sess->max_send_sge,
- cq_vector, cq_size, max_send_wr,
+ err = rtrs_cq_qp_create(&sess->s, &con->c, max_send_sge,
+ cq_vector, cq_num, max_send_wr,
max_recv_wr, IB_POLL_SOFTIRQ);
/*
* In case of error we do not bother to clean previous allocations,
@@ -1672,9 +1717,9 @@ static void destroy_con_cq_qp(struct rtrs_clt_con *con)
lockdep_assert_held(&con->con_mutex);
rtrs_cq_qp_destroy(&con->c);
if (con->rsp_ius) {
- rtrs_iu_free(con->rsp_ius, sess->s.dev->ib_dev, con->queue_size);
+ rtrs_iu_free(con->rsp_ius, sess->s.dev->ib_dev, con->queue_num);
con->rsp_ius = NULL;
- con->queue_size = 0;
+ con->queue_num = 0;
}
if (sess->s.dev_ref && !--sess->s.dev_ref) {
rtrs_ib_dev_put(sess->s.dev);
@@ -1783,12 +1828,19 @@ static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
if (con->c.cid == 0) {
queue_depth = le16_to_cpu(msg->queue_depth);
- if (queue_depth > MAX_SESS_QUEUE_DEPTH) {
- rtrs_err(clt, "Invalid RTRS message: queue=%d\n",
- queue_depth);
+ if (sess->queue_depth > 0 && queue_depth != sess->queue_depth) {
+ rtrs_err(clt, "Error: queue depth changed\n");
+
+ /*
+ * Stop any more reconnection attempts
+ */
+ sess->reconnect_attempts = -1;
+ rtrs_err(clt,
+ "Disabling auto-reconnect. Trigger a manual reconnect after issue is resolved\n");
return -ECONNRESET;
}
- if (!sess->rbufs || sess->queue_depth < queue_depth) {
+
+ if (!sess->rbufs) {
kfree(sess->rbufs);
sess->rbufs = kcalloc(queue_depth, sizeof(*sess->rbufs),
GFP_KERNEL);
@@ -1802,7 +1854,7 @@ static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
sess->chunk_size = sess->max_io_size + sess->max_hdr_size;
/*
- * Global queue depth and IO size is always a minimum.
+ * Global IO size is always a minimum.
* If while a reconnection server sends us a value a bit
* higher - client does not care and uses cached minimum.
*
@@ -1810,8 +1862,7 @@ static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
* connections in parallel, use lock.
*/
mutex_lock(&clt->paths_mutex);
- clt->queue_depth = min_not_zero(sess->queue_depth,
- clt->queue_depth);
+ clt->queue_depth = sess->queue_depth;
clt->max_io_size = min_not_zero(sess->max_io_size,
clt->max_io_size);
mutex_unlock(&clt->paths_mutex);
@@ -1869,7 +1920,7 @@ static int rtrs_rdma_conn_rejected(struct rtrs_clt_con *con,
return -ECONNRESET;
}
-static void rtrs_clt_close_conns(struct rtrs_clt_sess *sess, bool wait)
+void rtrs_clt_close_conns(struct rtrs_clt_sess *sess, bool wait)
{
if (rtrs_clt_change_state_get_old(sess, RTRS_CLT_CLOSING, NULL))
queue_work(rtrs_wq, &sess->close_work);
@@ -2098,7 +2149,7 @@ static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_sess *sess)
*/
synchronize_rcu();
- rtrs_clt_stop_hb(sess);
+ rtrs_stop_hb(&sess->s);
/*
* The order it utterly crucial: firstly disconnect and complete all
@@ -2291,7 +2342,7 @@ static int init_conns(struct rtrs_clt_sess *sess)
if (err)
goto destroy;
- rtrs_clt_start_hb(sess);
+ rtrs_start_hb(&sess->s);
return 0;
@@ -2465,7 +2516,7 @@ static int rtrs_send_sess_info(struct rtrs_clt_sess *sess)
int err;
rx_sz = sizeof(struct rtrs_msg_info_rsp);
- rx_sz += sizeof(u64) * MAX_SESS_QUEUE_DEPTH;
+ rx_sz += sizeof(struct rtrs_sg_desc) * sess->queue_depth;
tx_iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_info_req), GFP_KERNEL,
sess->s.dev->ib_dev, DMA_TO_DEVICE,
@@ -2617,7 +2668,6 @@ static struct rtrs_clt *alloc_clt(const char *sessname, size_t paths_num,
u16 port, size_t pdu_sz, void *priv,
void (*link_ev)(void *priv,
enum rtrs_clt_link_ev ev),
- unsigned int max_segments,
unsigned int reconnect_delay_sec,
unsigned int max_reconnect_attempts)
{
@@ -2646,13 +2696,13 @@ static struct rtrs_clt *alloc_clt(const char *sessname, size_t paths_num,
clt->paths_up = MAX_PATHS_NUM;
clt->port = port;
clt->pdu_sz = pdu_sz;
- clt->max_segments = max_segments;
+ clt->max_segments = RTRS_MAX_SEGMENTS;
clt->reconnect_delay_sec = reconnect_delay_sec;
clt->max_reconnect_attempts = max_reconnect_attempts;
clt->priv = priv;
clt->link_ev = link_ev;
clt->mp_policy = MP_POLICY_MIN_INFLIGHT;
- strlcpy(clt->sessname, sessname, sizeof(clt->sessname));
+ strscpy(clt->sessname, sessname, sizeof(clt->sessname));
init_waitqueue_head(&clt->permits_wait);
mutex_init(&clt->paths_ev_mutex);
mutex_init(&clt->paths_mutex);
@@ -2715,7 +2765,6 @@ static void free_clt(struct rtrs_clt *clt)
* @port: port to be used by the RTRS session
* @pdu_sz: Size of extra payload which can be accessed after permit allocation.
* @reconnect_delay_sec: time between reconnect tries
- * @max_segments: Max. number of segments per IO request
* @max_reconnect_attempts: Number of times to reconnect on error before giving
* up, 0 for * disabled, -1 for forever
* @nr_poll_queues: number of polling mode connection using IB_POLL_DIRECT flag
@@ -2730,7 +2779,6 @@ struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
const struct rtrs_addr *paths,
size_t paths_num, u16 port,
size_t pdu_sz, u8 reconnect_delay_sec,
- u16 max_segments,
s16 max_reconnect_attempts, u32 nr_poll_queues)
{
struct rtrs_clt_sess *sess, *tmp;
@@ -2739,7 +2787,7 @@ struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
clt = alloc_clt(sessname, paths_num, port, pdu_sz, ops->priv,
ops->link_ev,
- max_segments, reconnect_delay_sec,
+ reconnect_delay_sec,
max_reconnect_attempts);
if (IS_ERR(clt)) {
err = PTR_ERR(clt);
@@ -2749,7 +2797,7 @@ struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
struct rtrs_clt_sess *sess;
sess = alloc_sess(clt, &paths[i], nr_cpu_ids,
- max_segments, nr_poll_queues);
+ nr_poll_queues);
if (IS_ERR(sess)) {
err = PTR_ERR(sess);
goto close_all_sess;
@@ -2762,6 +2810,8 @@ struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
if (err) {
list_del_rcu(&sess->s.entry);
rtrs_clt_close_conns(sess, true);
+ free_percpu(sess->stats->pcpu_stats);
+ kfree(sess->stats);
free_sess(sess);
goto close_all_sess;
}
@@ -2770,6 +2820,8 @@ struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
if (err) {
list_del_rcu(&sess->s.entry);
rtrs_clt_close_conns(sess, true);
+ free_percpu(sess->stats->pcpu_stats);
+ kfree(sess->stats);
free_sess(sess);
goto close_all_sess;
}
@@ -2841,13 +2893,6 @@ int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_sess *sess)
return err;
}
-int rtrs_clt_disconnect_from_sysfs(struct rtrs_clt_sess *sess)
-{
- rtrs_clt_close_conns(sess, true);
-
- return 0;
-}
-
int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess,
const struct attribute *sysfs_self)
{
@@ -3014,6 +3059,7 @@ int rtrs_clt_query(struct rtrs_clt *clt, struct rtrs_attrs *attr)
return -ECOMM;
attr->queue_depth = clt->queue_depth;
+ attr->max_segments = clt->max_segments;
/* Cap max_io_size to min of remote buffer size and the fr pages */
attr->max_io_size = min_t(int, clt->max_io_size,
clt->max_segments * SZ_4K);
@@ -3028,7 +3074,7 @@ int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt,
struct rtrs_clt_sess *sess;
int err;
- sess = alloc_sess(clt, addr, nr_cpu_ids, clt->max_segments, 0);
+ sess = alloc_sess(clt, addr, nr_cpu_ids, 0);
if (IS_ERR(sess))
return PTR_ERR(sess);
@@ -3052,6 +3098,8 @@ int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt,
close_sess:
rtrs_clt_remove_path_from_arr(sess);
rtrs_clt_close_conns(sess, true);
+ free_percpu(sess->stats->pcpu_stats);
+ kfree(sess->stats);
free_sess(sess);
return err;
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.h b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
index 4c52f30e4da1..e276a2dfcf7c 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
@@ -71,7 +71,7 @@ struct rtrs_clt_stats {
struct rtrs_clt_con {
struct rtrs_con c;
struct rtrs_iu *rsp_ius;
- u32 queue_size;
+ u32 queue_num;
unsigned int cpu;
struct mutex con_mutex;
atomic_t io_cnt;
@@ -116,6 +116,7 @@ struct rtrs_clt_io_req {
int inv_errno;
bool need_inv_comp;
bool need_inv;
+ refcount_t ref;
};
struct rtrs_rbuf {
@@ -141,7 +142,6 @@ struct rtrs_clt_sess {
u32 chunk_size;
size_t queue_depth;
u32 max_pages_per_mr;
- int max_send_sge;
u32 flags;
struct kobject kobj;
u8 for_new_clt;
@@ -202,7 +202,7 @@ static inline struct rtrs_permit *get_permit(struct rtrs_clt *clt, int idx)
}
int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_sess *sess);
-int rtrs_clt_disconnect_from_sysfs(struct rtrs_clt_sess *sess);
+void rtrs_clt_close_conns(struct rtrs_clt_sess *sess, bool wait);
int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt,
struct rtrs_addr *addr);
int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess,
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-pri.h b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
index 86e65cf30cab..36f184a3b676 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-pri.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
@@ -47,12 +47,16 @@ enum {
MAX_PATHS_NUM = 128,
/*
- * With the size of struct rtrs_permit allocated on the client, 4K
- * is the maximum number of rtrs_permits we can allocate. This number is
- * also used on the client to allocate the IU for the user connection
- * to receive the RDMA addresses from the server.
+ * Max IB immediate data size is 2^28 (MAX_IMM_PAYL_BITS)
+ * and the minimum chunk size is 4096 (2^12).
+ * So the maximum sess_queue_depth is 65536 (2^16) in theory.
+ * But mempool_create, create_qp and ib_post_send fail with
+ * "cannot allocate memory" error if sess_queue_depth is too big.
+ * Therefore the pratical max value of sess_queue_depth is
+ * somewhere between 1 and 65534 and it depends on the system.
*/
- MAX_SESS_QUEUE_DEPTH = 4096,
+ MAX_SESS_QUEUE_DEPTH = 65535,
+ MIN_CHUNK_SIZE = 8192,
RTRS_HB_INTERVAL_MS = 5000,
RTRS_HB_MISSED_MAX = 5,
@@ -91,7 +95,7 @@ struct rtrs_con {
struct ib_cq *cq;
struct rdma_cm_id *cm_id;
unsigned int cid;
- u16 cq_size;
+ int nr_cqe;
};
struct rtrs_sess {
@@ -290,10 +294,10 @@ struct rtrs_msg_rdma_hdr {
/* rtrs.c */
-struct rtrs_iu *rtrs_iu_alloc(u32 queue_size, size_t size, gfp_t t,
+struct rtrs_iu *rtrs_iu_alloc(u32 queue_num, size_t size, gfp_t t,
struct ib_device *dev, enum dma_data_direction,
void (*done)(struct ib_cq *cq, struct ib_wc *wc));
-void rtrs_iu_free(struct rtrs_iu *iu, struct ib_device *dev, u32 queue_size);
+void rtrs_iu_free(struct rtrs_iu *iu, struct ib_device *dev, u32 queue_num);
int rtrs_iu_post_recv(struct rtrs_con *con, struct rtrs_iu *iu);
int rtrs_iu_post_send(struct rtrs_con *con, struct rtrs_iu *iu, size_t size,
struct ib_send_wr *head);
@@ -301,15 +305,16 @@ int rtrs_iu_post_rdma_write_imm(struct rtrs_con *con, struct rtrs_iu *iu,
struct ib_sge *sge, unsigned int num_sge,
u32 rkey, u64 rdma_addr, u32 imm_data,
enum ib_send_flags flags,
- struct ib_send_wr *head);
+ struct ib_send_wr *head,
+ struct ib_send_wr *tail);
int rtrs_post_recv_empty(struct rtrs_con *con, struct ib_cqe *cqe);
int rtrs_post_rdma_write_imm_empty(struct rtrs_con *con, struct ib_cqe *cqe,
u32 imm_data, enum ib_send_flags flags,
struct ib_send_wr *head);
-int rtrs_cq_qp_create(struct rtrs_sess *rtrs_sess, struct rtrs_con *con,
- u32 max_send_sge, int cq_vector, int cq_size,
+int rtrs_cq_qp_create(struct rtrs_sess *sess, struct rtrs_con *con,
+ u32 max_send_sge, int cq_vector, int nr_cqe,
u32 max_send_wr, u32 max_recv_wr,
enum ib_poll_context poll_ctx);
void rtrs_cq_qp_destroy(struct rtrs_con *con);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c
index e102b1368d0c..12c374b5eb6e 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c
@@ -27,12 +27,10 @@ ssize_t rtrs_srv_stats_rdma_to_str(struct rtrs_srv_stats *stats,
char *page, size_t len)
{
struct rtrs_srv_stats_rdma_stats *r = &stats->rdma_stats;
- struct rtrs_srv_sess *sess = stats->sess;
- return scnprintf(page, len, "%lld %lld %lld %lld %u\n",
- (s64)atomic64_read(&r->dir[READ].cnt),
- (s64)atomic64_read(&r->dir[READ].size_total),
- (s64)atomic64_read(&r->dir[WRITE].cnt),
- (s64)atomic64_read(&r->dir[WRITE].size_total),
- atomic_read(&sess->ids_inflight));
+ return sysfs_emit(page, "%lld %lld %lld %lldn %u\n",
+ (s64)atomic64_read(&r->dir[READ].cnt),
+ (s64)atomic64_read(&r->dir[READ].size_total),
+ (s64)atomic64_read(&r->dir[WRITE].cnt),
+ (s64)atomic64_read(&r->dir[WRITE].size_total), 0);
}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
index a9288175fbb5..20efd44297fb 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
@@ -208,6 +208,7 @@ rtrs_srv_destroy_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
device_del(&srv->dev);
put_device(&srv->dev);
} else {
+ put_device(&srv->dev);
mutex_unlock(&srv->paths_mutex);
}
}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
index 0fa116cabc44..3df290086169 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
@@ -67,59 +67,33 @@ static inline struct rtrs_srv_sess *to_srv_sess(struct rtrs_sess *s)
return container_of(s, struct rtrs_srv_sess, s);
}
-static bool __rtrs_srv_change_state(struct rtrs_srv_sess *sess,
- enum rtrs_srv_state new_state)
+static bool rtrs_srv_change_state(struct rtrs_srv_sess *sess,
+ enum rtrs_srv_state new_state)
{
enum rtrs_srv_state old_state;
bool changed = false;
- lockdep_assert_held(&sess->state_lock);
+ spin_lock_irq(&sess->state_lock);
old_state = sess->state;
switch (new_state) {
case RTRS_SRV_CONNECTED:
- switch (old_state) {
- case RTRS_SRV_CONNECTING:
+ if (old_state == RTRS_SRV_CONNECTING)
changed = true;
- fallthrough;
- default:
- break;
- }
break;
case RTRS_SRV_CLOSING:
- switch (old_state) {
- case RTRS_SRV_CONNECTING:
- case RTRS_SRV_CONNECTED:
+ if (old_state == RTRS_SRV_CONNECTING ||
+ old_state == RTRS_SRV_CONNECTED)
changed = true;
- fallthrough;
- default:
- break;
- }
break;
case RTRS_SRV_CLOSED:
- switch (old_state) {
- case RTRS_SRV_CLOSING:
+ if (old_state == RTRS_SRV_CLOSING)
changed = true;
- fallthrough;
- default:
- break;
- }
break;
default:
break;
}
if (changed)
sess->state = new_state;
-
- return changed;
-}
-
-static bool rtrs_srv_change_state(struct rtrs_srv_sess *sess,
- enum rtrs_srv_state new_state)
-{
- bool changed;
-
- spin_lock_irq(&sess->state_lock);
- changed = __rtrs_srv_change_state(sess, new_state);
spin_unlock_irq(&sess->state_lock);
return changed;
@@ -137,7 +111,6 @@ static void rtrs_srv_free_ops_ids(struct rtrs_srv_sess *sess)
struct rtrs_srv *srv = sess->srv;
int i;
- WARN_ON(atomic_read(&sess->ids_inflight));
if (sess->ops_ids) {
for (i = 0; i < srv->queue_depth; i++)
free_id(sess->ops_ids[i]);
@@ -152,11 +125,19 @@ static struct ib_cqe io_comp_cqe = {
.done = rtrs_srv_rdma_done
};
+static inline void rtrs_srv_inflight_ref_release(struct percpu_ref *ref)
+{
+ struct rtrs_srv_sess *sess = container_of(ref, struct rtrs_srv_sess, ids_inflight_ref);
+
+ percpu_ref_exit(&sess->ids_inflight_ref);
+ complete(&sess->complete_done);
+}
+
static int rtrs_srv_alloc_ops_ids(struct rtrs_srv_sess *sess)
{
struct rtrs_srv *srv = sess->srv;
struct rtrs_srv_op *id;
- int i;
+ int i, ret;
sess->ops_ids = kcalloc(srv->queue_depth, sizeof(*sess->ops_ids),
GFP_KERNEL);
@@ -170,8 +151,14 @@ static int rtrs_srv_alloc_ops_ids(struct rtrs_srv_sess *sess)
sess->ops_ids[i] = id;
}
- init_waitqueue_head(&sess->ids_waitq);
- atomic_set(&sess->ids_inflight, 0);
+
+ ret = percpu_ref_init(&sess->ids_inflight_ref,
+ rtrs_srv_inflight_ref_release, 0, GFP_KERNEL);
+ if (ret) {
+ pr_err("Percpu reference init failed\n");
+ goto err;
+ }
+ init_completion(&sess->complete_done);
return 0;
@@ -182,21 +169,14 @@ err:
static inline void rtrs_srv_get_ops_ids(struct rtrs_srv_sess *sess)
{
- atomic_inc(&sess->ids_inflight);
+ percpu_ref_get(&sess->ids_inflight_ref);
}
static inline void rtrs_srv_put_ops_ids(struct rtrs_srv_sess *sess)
{
- if (atomic_dec_and_test(&sess->ids_inflight))
- wake_up(&sess->ids_waitq);
+ percpu_ref_put(&sess->ids_inflight_ref);
}
-static void rtrs_srv_wait_ops_ids(struct rtrs_srv_sess *sess)
-{
- wait_event(sess->ids_waitq, !atomic_read(&sess->ids_inflight));
-}
-
-
static void rtrs_srv_reg_mr_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
@@ -773,7 +753,40 @@ static void rtrs_srv_sess_down(struct rtrs_srv_sess *sess)
mutex_unlock(&srv->paths_ev_mutex);
}
+static bool exist_sessname(struct rtrs_srv_ctx *ctx,
+ const char *sessname, const uuid_t *path_uuid)
+{
+ struct rtrs_srv *srv;
+ struct rtrs_srv_sess *sess;
+ bool found = false;
+
+ mutex_lock(&ctx->srv_mutex);
+ list_for_each_entry(srv, &ctx->srv_list, ctx_list) {
+ mutex_lock(&srv->paths_mutex);
+
+ /* when a client with same uuid and same sessname tried to add a path */
+ if (uuid_equal(&srv->paths_uuid, path_uuid)) {
+ mutex_unlock(&srv->paths_mutex);
+ continue;
+ }
+
+ list_for_each_entry(sess, &srv->paths_list, s.entry) {
+ if (strlen(sess->s.sessname) == strlen(sessname) &&
+ !strcmp(sess->s.sessname, sessname)) {
+ found = true;
+ break;
+ }
+ }
+ mutex_unlock(&srv->paths_mutex);
+ if (found)
+ break;
+ }
+ mutex_unlock(&ctx->srv_mutex);
+ return found;
+}
+
static int post_recv_sess(struct rtrs_srv_sess *sess);
+static int rtrs_rdma_do_reject(struct rdma_cm_id *cm_id, int errno);
static int process_info_req(struct rtrs_srv_con *con,
struct rtrs_msg_info_req *msg)
@@ -792,10 +805,17 @@ static int process_info_req(struct rtrs_srv_con *con,
rtrs_err(s, "post_recv_sess(), err: %d\n", err);
return err;
}
+
+ if (exist_sessname(sess->srv->ctx,
+ msg->sessname, &sess->srv->paths_uuid)) {
+ rtrs_err(s, "sessname is duplicated: %s\n", msg->sessname);
+ return -EPERM;
+ }
+ strscpy(sess->s.sessname, msg->sessname, sizeof(sess->s.sessname));
+
rwr = kcalloc(sess->mrs_num, sizeof(*rwr), GFP_KERNEL);
if (unlikely(!rwr))
return -ENOMEM;
- strlcpy(sess->s.sessname, msg->sessname, sizeof(sess->s.sessname));
tx_sz = sizeof(*rsp);
tx_sz += sizeof(rsp->desc[0]) * sess->mrs_num;
@@ -1276,7 +1296,7 @@ int rtrs_srv_get_sess_name(struct rtrs_srv *srv, char *sessname, size_t len)
list_for_each_entry(sess, &srv->paths_list, s.entry) {
if (sess->state != RTRS_SRV_CONNECTED)
continue;
- strlcpy(sessname, sess->s.sessname,
+ strscpy(sessname, sess->s.sessname,
min_t(size_t, sizeof(sess->s.sessname), len));
err = 0;
break;
@@ -1288,7 +1308,7 @@ int rtrs_srv_get_sess_name(struct rtrs_srv *srv, char *sessname, size_t len)
EXPORT_SYMBOL(rtrs_srv_get_sess_name);
/**
- * rtrs_srv_get_sess_qdepth() - Get rtrs_srv qdepth.
+ * rtrs_srv_get_queue_depth() - Get rtrs_srv qdepth.
* @srv: Session
*/
int rtrs_srv_get_queue_depth(struct rtrs_srv *srv)
@@ -1356,8 +1376,10 @@ static struct rtrs_srv *get_or_create_srv(struct rtrs_srv_ctx *ctx,
* If this request is not the first connection request from the
* client for this session then fail and return error.
*/
- if (!first_conn)
+ if (!first_conn) {
+ pr_err_ratelimited("Error: Not the first connection request for this session\n");
return ERR_PTR(-ENXIO);
+ }
/* need to allocate a new srv */
srv = kzalloc(sizeof(*srv), GFP_KERNEL);
@@ -1481,6 +1503,7 @@ static void free_sess(struct rtrs_srv_sess *sess)
kobject_del(&sess->kobj);
kobject_put(&sess->kobj);
} else {
+ kfree(sess->stats);
kfree(sess);
}
}
@@ -1503,8 +1526,15 @@ static void rtrs_srv_close_work(struct work_struct *work)
rdma_disconnect(con->c.cm_id);
ib_drain_qp(con->c.qp);
}
- /* Wait for all inflights */
- rtrs_srv_wait_ops_ids(sess);
+
+ /*
+ * Degrade ref count to the usual model with a single shared
+ * atomic_t counter
+ */
+ percpu_ref_kill(&sess->ids_inflight_ref);
+
+ /* Wait for all completion */
+ wait_for_completion(&sess->complete_done);
/* Notify upper layer if we are the last path */
rtrs_srv_sess_down(sess);
@@ -1604,7 +1634,7 @@ static int create_con(struct rtrs_srv_sess *sess,
struct rtrs_sess *s = &sess->s;
struct rtrs_srv_con *con;
- u32 cq_size, wr_queue_size;
+ u32 cq_num, max_send_wr, max_recv_wr, wr_limit;
int err, cq_vector;
con = kzalloc(sizeof(*con), GFP_KERNEL);
@@ -1619,36 +1649,42 @@ static int create_con(struct rtrs_srv_sess *sess,
con->c.sess = &sess->s;
con->c.cid = cid;
atomic_set(&con->wr_cnt, 1);
+ wr_limit = sess->s.dev->ib_dev->attrs.max_qp_wr;
if (con->c.cid == 0) {
/*
* All receive and all send (each requiring invalidate)
* + 2 for drain and heartbeat
*/
- wr_queue_size = SERVICE_CON_QUEUE_DEPTH * 3 + 2;
- cq_size = wr_queue_size;
+ max_send_wr = min_t(int, wr_limit,
+ SERVICE_CON_QUEUE_DEPTH * 2 + 2);
+ max_recv_wr = max_send_wr;
} else {
+ /* when always_invlaidate enalbed, we need linv+rinv+mr+imm */
+ if (always_invalidate)
+ max_send_wr =
+ min_t(int, wr_limit,
+ srv->queue_depth * (1 + 4) + 1);
+ else
+ max_send_wr =
+ min_t(int, wr_limit,
+ srv->queue_depth * (1 + 2) + 1);
+
+ max_recv_wr = srv->queue_depth + 1;
/*
* If we have all receive requests posted and
* all write requests posted and each read request
* requires an invalidate request + drain
* and qp gets into error state.
*/
- cq_size = srv->queue_depth * 3 + 1;
- /*
- * In theory we might have queue_depth * 32
- * outstanding requests if an unsafe global key is used
- * and we have queue_depth read requests each consisting
- * of 32 different addresses. div 3 for mlx5.
- */
- wr_queue_size = sess->s.dev->ib_dev->attrs.max_qp_wr / 3;
}
- atomic_set(&con->sq_wr_avail, wr_queue_size);
+ cq_num = max_send_wr + max_recv_wr;
+ atomic_set(&con->sq_wr_avail, max_send_wr);
cq_vector = rtrs_srv_get_next_cq_vector(sess);
/* TODO: SOFTIRQ can be faster, but be careful with softirq context */
- err = rtrs_cq_qp_create(&sess->s, &con->c, 1, cq_vector, cq_size,
- wr_queue_size, wr_queue_size,
+ err = rtrs_cq_qp_create(&sess->s, &con->c, 1, cq_vector, cq_num,
+ max_send_wr, max_recv_wr,
IB_POLL_WORKQUEUE);
if (err) {
rtrs_err(s, "rtrs_cq_qp_create(), err: %d\n", err);
@@ -1728,7 +1764,7 @@ static struct rtrs_srv_sess *__alloc_sess(struct rtrs_srv *srv,
path.src = &sess->s.src_addr;
path.dst = &sess->s.dst_addr;
rtrs_addr_to_str(&path, str, sizeof(str));
- strlcpy(sess->s.sessname, str, sizeof(sess->s.sessname));
+ strscpy(sess->s.sessname, str, sizeof(sess->s.sessname));
sess->s.con_num = con_num;
sess->s.recon_cnt = recon_cnt;
@@ -1780,38 +1816,39 @@ static int rtrs_rdma_connect(struct rdma_cm_id *cm_id,
u16 version, con_num, cid;
u16 recon_cnt;
- int err;
+ int err = -ECONNRESET;
if (len < sizeof(*msg)) {
pr_err("Invalid RTRS connection request\n");
- goto reject_w_econnreset;
+ goto reject_w_err;
}
if (le16_to_cpu(msg->magic) != RTRS_MAGIC) {
pr_err("Invalid RTRS magic\n");
- goto reject_w_econnreset;
+ goto reject_w_err;
}
version = le16_to_cpu(msg->version);
if (version >> 8 != RTRS_PROTO_VER_MAJOR) {
pr_err("Unsupported major RTRS version: %d, expected %d\n",
version >> 8, RTRS_PROTO_VER_MAJOR);
- goto reject_w_econnreset;
+ goto reject_w_err;
}
con_num = le16_to_cpu(msg->cid_num);
if (con_num > 4096) {
/* Sanity check */
pr_err("Too many connections requested: %d\n", con_num);
- goto reject_w_econnreset;
+ goto reject_w_err;
}
cid = le16_to_cpu(msg->cid);
if (cid >= con_num) {
/* Sanity check */
pr_err("Incorrect cid: %d >= %d\n", cid, con_num);
- goto reject_w_econnreset;
+ goto reject_w_err;
}
recon_cnt = le16_to_cpu(msg->recon_cnt);
srv = get_or_create_srv(ctx, &msg->paths_uuid, msg->first_conn);
if (IS_ERR(srv)) {
err = PTR_ERR(srv);
+ pr_err("get_or_create_srv(), error %d\n", err);
goto reject_w_err;
}
mutex_lock(&srv->paths_mutex);
@@ -1826,7 +1863,7 @@ static int rtrs_rdma_connect(struct rdma_cm_id *cm_id,
rtrs_err(s, "Session in wrong state: %s\n",
rtrs_srv_state_str(sess->state));
mutex_unlock(&srv->paths_mutex);
- goto reject_w_econnreset;
+ goto reject_w_err;
}
/*
* Sanity checks
@@ -1835,13 +1872,13 @@ static int rtrs_rdma_connect(struct rdma_cm_id *cm_id,
rtrs_err(s, "Incorrect request: %d, %d\n",
cid, con_num);
mutex_unlock(&srv->paths_mutex);
- goto reject_w_econnreset;
+ goto reject_w_err;
}
if (s->con[cid]) {
rtrs_err(s, "Connection already exists: %d\n",
cid);
mutex_unlock(&srv->paths_mutex);
- goto reject_w_econnreset;
+ goto reject_w_err;
}
} else {
sess = __alloc_sess(srv, cm_id, con_num, recon_cnt,
@@ -1850,11 +1887,13 @@ static int rtrs_rdma_connect(struct rdma_cm_id *cm_id,
mutex_unlock(&srv->paths_mutex);
put_srv(srv);
err = PTR_ERR(sess);
+ pr_err("RTRS server session allocation failed: %d\n", err);
goto reject_w_err;
}
}
err = create_con(sess, cm_id, cid);
if (err) {
+ rtrs_err((&sess->s), "create_con(), error %d\n", err);
(void)rtrs_rdma_do_reject(cm_id, err);
/*
* Since session has other connections we follow normal way
@@ -1865,6 +1904,7 @@ static int rtrs_rdma_connect(struct rdma_cm_id *cm_id,
}
err = rtrs_rdma_do_accept(sess, cm_id);
if (err) {
+ rtrs_err((&sess->s), "rtrs_rdma_do_accept(), error %d\n", err);
(void)rtrs_rdma_do_reject(cm_id, err);
/*
* Since current connection was successfully added to the
@@ -1882,9 +1922,6 @@ static int rtrs_rdma_connect(struct rdma_cm_id *cm_id,
reject_w_err:
return rtrs_rdma_do_reject(cm_id, err);
-reject_w_econnreset:
- return rtrs_rdma_do_reject(cm_id, -ECONNRESET);
-
close_and_return_err:
mutex_unlock(&srv->paths_mutex);
close_sess(sess);
@@ -2177,9 +2214,9 @@ static int check_module_params(void)
sess_queue_depth, 1, MAX_SESS_QUEUE_DEPTH);
return -EINVAL;
}
- if (max_chunk_size < 4096 || !is_power_of_2(max_chunk_size)) {
+ if (max_chunk_size < MIN_CHUNK_SIZE || !is_power_of_2(max_chunk_size)) {
pr_err("Invalid max_chunk_size value %d, has to be >= %d and should be power of two.\n",
- max_chunk_size, 4096);
+ max_chunk_size, MIN_CHUNK_SIZE);
return -EINVAL;
}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.h b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
index 9543ae19996c..f8da2e3f0bda 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
@@ -81,8 +81,8 @@ struct rtrs_srv_sess {
spinlock_t state_lock;
int cur_cq_vector;
struct rtrs_srv_op **ops_ids;
- atomic_t ids_inflight;
- wait_queue_head_t ids_waitq;
+ struct percpu_ref ids_inflight_ref;
+ struct completion complete_done;
struct rtrs_srv_mr *mrs;
unsigned int mrs_num;
dma_addr_t *dma_addr;
diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c
index a7847282a2eb..61919ebd92b2 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs.c
@@ -18,7 +18,7 @@
MODULE_DESCRIPTION("RDMA Transport Core");
MODULE_LICENSE("GPL");
-struct rtrs_iu *rtrs_iu_alloc(u32 queue_size, size_t size, gfp_t gfp_mask,
+struct rtrs_iu *rtrs_iu_alloc(u32 iu_num, size_t size, gfp_t gfp_mask,
struct ib_device *dma_dev,
enum dma_data_direction dir,
void (*done)(struct ib_cq *cq, struct ib_wc *wc))
@@ -26,10 +26,10 @@ struct rtrs_iu *rtrs_iu_alloc(u32 queue_size, size_t size, gfp_t gfp_mask,
struct rtrs_iu *ius, *iu;
int i;
- ius = kcalloc(queue_size, sizeof(*ius), gfp_mask);
+ ius = kcalloc(iu_num, sizeof(*ius), gfp_mask);
if (!ius)
return NULL;
- for (i = 0; i < queue_size; i++) {
+ for (i = 0; i < iu_num; i++) {
iu = &ius[i];
iu->direction = dir;
iu->buf = kzalloc(size, gfp_mask);
@@ -50,7 +50,7 @@ err:
}
EXPORT_SYMBOL_GPL(rtrs_iu_alloc);
-void rtrs_iu_free(struct rtrs_iu *ius, struct ib_device *ibdev, u32 queue_size)
+void rtrs_iu_free(struct rtrs_iu *ius, struct ib_device *ibdev, u32 queue_num)
{
struct rtrs_iu *iu;
int i;
@@ -58,7 +58,7 @@ void rtrs_iu_free(struct rtrs_iu *ius, struct ib_device *ibdev, u32 queue_size)
if (!ius)
return;
- for (i = 0; i < queue_size; i++) {
+ for (i = 0; i < queue_num; i++) {
iu = &ius[i];
ib_dma_unmap_single(ibdev, iu->dma_addr, iu->size, iu->direction);
kfree(iu->buf);
@@ -105,18 +105,21 @@ int rtrs_post_recv_empty(struct rtrs_con *con, struct ib_cqe *cqe)
EXPORT_SYMBOL_GPL(rtrs_post_recv_empty);
static int rtrs_post_send(struct ib_qp *qp, struct ib_send_wr *head,
- struct ib_send_wr *wr)
+ struct ib_send_wr *wr, struct ib_send_wr *tail)
{
if (head) {
- struct ib_send_wr *tail = head;
+ struct ib_send_wr *next = head;
- while (tail->next)
- tail = tail->next;
- tail->next = wr;
+ while (next->next)
+ next = next->next;
+ next->next = wr;
} else {
head = wr;
}
+ if (tail)
+ wr->next = tail;
+
return ib_post_send(qp, head, NULL);
}
@@ -142,15 +145,16 @@ int rtrs_iu_post_send(struct rtrs_con *con, struct rtrs_iu *iu, size_t size,
.send_flags = IB_SEND_SIGNALED,
};
- return rtrs_post_send(con->qp, head, &wr);
+ return rtrs_post_send(con->qp, head, &wr, NULL);
}
EXPORT_SYMBOL_GPL(rtrs_iu_post_send);
int rtrs_iu_post_rdma_write_imm(struct rtrs_con *con, struct rtrs_iu *iu,
- struct ib_sge *sge, unsigned int num_sge,
- u32 rkey, u64 rdma_addr, u32 imm_data,
- enum ib_send_flags flags,
- struct ib_send_wr *head)
+ struct ib_sge *sge, unsigned int num_sge,
+ u32 rkey, u64 rdma_addr, u32 imm_data,
+ enum ib_send_flags flags,
+ struct ib_send_wr *head,
+ struct ib_send_wr *tail)
{
struct ib_rdma_wr wr;
int i;
@@ -174,7 +178,7 @@ int rtrs_iu_post_rdma_write_imm(struct rtrs_con *con, struct rtrs_iu *iu,
if (WARN_ON(sge[i].length == 0))
return -EINVAL;
- return rtrs_post_send(con->qp, head, &wr.wr);
+ return rtrs_post_send(con->qp, head, &wr.wr, tail);
}
EXPORT_SYMBOL_GPL(rtrs_iu_post_rdma_write_imm);
@@ -191,7 +195,7 @@ int rtrs_post_rdma_write_imm_empty(struct rtrs_con *con, struct ib_cqe *cqe,
.wr.ex.imm_data = cpu_to_be32(imm_data),
};
- return rtrs_post_send(con->qp, head, &wr.wr);
+ return rtrs_post_send(con->qp, head, &wr.wr, NULL);
}
EXPORT_SYMBOL_GPL(rtrs_post_rdma_write_imm_empty);
@@ -212,20 +216,20 @@ static void qp_event_handler(struct ib_event *ev, void *ctx)
}
}
-static int create_cq(struct rtrs_con *con, int cq_vector, u16 cq_size,
+static int create_cq(struct rtrs_con *con, int cq_vector, int nr_cqe,
enum ib_poll_context poll_ctx)
{
struct rdma_cm_id *cm_id = con->cm_id;
struct ib_cq *cq;
- cq = ib_cq_pool_get(cm_id->device, cq_size, cq_vector, poll_ctx);
+ cq = ib_cq_pool_get(cm_id->device, nr_cqe, cq_vector, poll_ctx);
if (IS_ERR(cq)) {
rtrs_err(con->sess, "Creating completion queue failed, errno: %ld\n",
PTR_ERR(cq));
return PTR_ERR(cq);
}
con->cq = cq;
- con->cq_size = cq_size;
+ con->nr_cqe = nr_cqe;
return 0;
}
@@ -260,20 +264,20 @@ static int create_qp(struct rtrs_con *con, struct ib_pd *pd,
}
int rtrs_cq_qp_create(struct rtrs_sess *sess, struct rtrs_con *con,
- u32 max_send_sge, int cq_vector, int cq_size,
+ u32 max_send_sge, int cq_vector, int nr_cqe,
u32 max_send_wr, u32 max_recv_wr,
enum ib_poll_context poll_ctx)
{
int err;
- err = create_cq(con, cq_vector, cq_size, poll_ctx);
+ err = create_cq(con, cq_vector, nr_cqe, poll_ctx);
if (err)
return err;
err = create_qp(con, sess->dev->ib_pd, max_send_wr, max_recv_wr,
max_send_sge);
if (err) {
- ib_cq_pool_put(con->cq, con->cq_size);
+ ib_cq_pool_put(con->cq, con->nr_cqe);
con->cq = NULL;
return err;
}
@@ -290,7 +294,7 @@ void rtrs_cq_qp_destroy(struct rtrs_con *con)
con->qp = NULL;
}
if (con->cq) {
- ib_cq_pool_put(con->cq, con->cq_size);
+ ib_cq_pool_put(con->cq, con->nr_cqe);
con->cq = NULL;
}
}
@@ -376,7 +380,6 @@ void rtrs_stop_hb(struct rtrs_sess *sess)
{
cancel_delayed_work_sync(&sess->hb_dwork);
sess->hb_missed_cnt = 0;
- sess->hb_missed_max = 0;
}
EXPORT_SYMBOL_GPL(rtrs_stop_hb);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs.h b/drivers/infiniband/ulp/rtrs/rtrs.h
index dc3e1af1a85b..859c79685daf 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs.h
@@ -57,7 +57,6 @@ struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
const struct rtrs_addr *paths,
size_t path_cnt, u16 port,
size_t pdu_sz, u8 reconnect_delay_sec,
- u16 max_segments,
s16 max_reconnect_attempts, u32 nr_poll_queues);
void rtrs_clt_close(struct rtrs_clt *sess);
@@ -110,6 +109,7 @@ int rtrs_clt_rdma_cq_direct(struct rtrs_clt *clt, unsigned int index);
struct rtrs_attrs {
u32 queue_depth;
u32 max_io_size;
+ u32 max_segments;
};
int rtrs_clt_query(struct rtrs_clt *sess, struct rtrs_attrs *attr);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 31f8aa2c40ed..6ba48a09eac4 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -965,68 +965,52 @@ static void srp_disconnect_target(struct srp_target_port *target)
}
}
-static void srp_free_req_data(struct srp_target_port *target,
- struct srp_rdma_ch *ch)
+static int srp_exit_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
{
+ struct srp_target_port *target = host_to_target(shost);
struct srp_device *dev = target->srp_host->srp_dev;
struct ib_device *ibdev = dev->dev;
- struct srp_request *req;
- int i;
-
- if (!ch->req_ring)
- return;
+ struct srp_request *req = scsi_cmd_priv(cmd);
- for (i = 0; i < target->req_ring_size; ++i) {
- req = &ch->req_ring[i];
- if (dev->use_fast_reg)
- kfree(req->fr_list);
- if (req->indirect_dma_addr) {
- ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
- target->indirect_size,
- DMA_TO_DEVICE);
- }
- kfree(req->indirect_desc);
+ kfree(req->fr_list);
+ if (req->indirect_dma_addr) {
+ ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
+ target->indirect_size,
+ DMA_TO_DEVICE);
}
+ kfree(req->indirect_desc);
- kfree(ch->req_ring);
- ch->req_ring = NULL;
+ return 0;
}
-static int srp_alloc_req_data(struct srp_rdma_ch *ch)
+static int srp_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
{
- struct srp_target_port *target = ch->target;
+ struct srp_target_port *target = host_to_target(shost);
struct srp_device *srp_dev = target->srp_host->srp_dev;
struct ib_device *ibdev = srp_dev->dev;
- struct srp_request *req;
- void *mr_list;
+ struct srp_request *req = scsi_cmd_priv(cmd);
dma_addr_t dma_addr;
- int i, ret = -ENOMEM;
-
- ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
- GFP_KERNEL);
- if (!ch->req_ring)
- goto out;
+ int ret = -ENOMEM;
- for (i = 0; i < target->req_ring_size; ++i) {
- req = &ch->req_ring[i];
- mr_list = kmalloc_array(target->mr_per_cmd, sizeof(void *),
+ if (srp_dev->use_fast_reg) {
+ req->fr_list = kmalloc_array(target->mr_per_cmd, sizeof(void *),
GFP_KERNEL);
- if (!mr_list)
- goto out;
- if (srp_dev->use_fast_reg)
- req->fr_list = mr_list;
- req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
- if (!req->indirect_desc)
- goto out;
-
- dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
- target->indirect_size,
- DMA_TO_DEVICE);
- if (ib_dma_mapping_error(ibdev, dma_addr))
+ if (!req->fr_list)
goto out;
+ }
+ req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
+ if (!req->indirect_desc)
+ goto out;
- req->indirect_dma_addr = dma_addr;
+ dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
+ target->indirect_size,
+ DMA_TO_DEVICE);
+ if (ib_dma_mapping_error(ibdev, dma_addr)) {
+ srp_exit_cmd_priv(shost, cmd);
+ goto out;
}
+
+ req->indirect_dma_addr = dma_addr;
ret = 0;
out:
@@ -1068,10 +1052,6 @@ static void srp_remove_target(struct srp_target_port *target)
}
cancel_work_sync(&target->tl_err_work);
srp_rport_put(target->rport);
- for (i = 0; i < target->ch_count; i++) {
- ch = &target->ch[i];
- srp_free_req_data(target, ch);
- }
kfree(target->ch);
target->ch = NULL;
@@ -1290,22 +1270,32 @@ static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
}
}
-static void srp_terminate_io(struct srp_rport *rport)
+struct srp_terminate_context {
+ struct srp_target_port *srp_target;
+ int scsi_result;
+};
+
+static bool srp_terminate_cmd(struct scsi_cmnd *scmnd, void *context_ptr,
+ bool reserved)
{
- struct srp_target_port *target = rport->lld_data;
- struct srp_rdma_ch *ch;
- int i, j;
+ struct srp_terminate_context *context = context_ptr;
+ struct srp_target_port *target = context->srp_target;
+ u32 tag = blk_mq_unique_tag(scmnd->request);
+ struct srp_rdma_ch *ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
+ struct srp_request *req = scsi_cmd_priv(scmnd);
- for (i = 0; i < target->ch_count; i++) {
- ch = &target->ch[i];
+ srp_finish_req(ch, req, NULL, context->scsi_result);
- for (j = 0; j < target->req_ring_size; ++j) {
- struct srp_request *req = &ch->req_ring[j];
+ return true;
+}
- srp_finish_req(ch, req, NULL,
- DID_TRANSPORT_FAILFAST << 16);
- }
- }
+static void srp_terminate_io(struct srp_rport *rport)
+{
+ struct srp_target_port *target = rport->lld_data;
+ struct srp_terminate_context context = { .srp_target = target,
+ .scsi_result = DID_TRANSPORT_FAILFAST << 16 };
+
+ scsi_host_busy_iter(target->scsi_host, srp_terminate_cmd, &context);
}
/* Calculate maximum initiator to target information unit length. */
@@ -1361,13 +1351,12 @@ static int srp_rport_reconnect(struct srp_rport *rport)
ch = &target->ch[i];
ret += srp_new_cm_id(ch);
}
- for (i = 0; i < target->ch_count; i++) {
- ch = &target->ch[i];
- for (j = 0; j < target->req_ring_size; ++j) {
- struct srp_request *req = &ch->req_ring[j];
+ {
+ struct srp_terminate_context context = {
+ .srp_target = target, .scsi_result = DID_RESET << 16};
- srp_finish_req(ch, req, NULL, DID_RESET << 16);
- }
+ scsi_host_busy_iter(target->scsi_host, srp_terminate_cmd,
+ &context);
}
for (i = 0; i < target->ch_count; i++) {
ch = &target->ch[i];
@@ -1963,13 +1952,10 @@ static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
spin_unlock_irqrestore(&ch->lock, flags);
} else {
scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
- if (scmnd && scmnd->host_scribble) {
- req = (void *)scmnd->host_scribble;
+ if (scmnd) {
+ req = scsi_cmd_priv(scmnd);
scmnd = srp_claim_req(ch, req, NULL, scmnd);
} else {
- scmnd = NULL;
- }
- if (!scmnd) {
shost_printk(KERN_ERR, target->scsi_host,
"Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
rsp->tag, ch - target->ch, ch->qp->qp_num);
@@ -2001,7 +1987,6 @@ static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
srp_free_req(ch, req, scmnd,
be32_to_cpu(rsp->req_lim_delta));
- scmnd->host_scribble = NULL;
scmnd->scsi_done(scmnd);
}
}
@@ -2169,13 +2154,12 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
{
struct srp_target_port *target = host_to_target(shost);
struct srp_rdma_ch *ch;
- struct srp_request *req;
+ struct srp_request *req = scsi_cmd_priv(scmnd);
struct srp_iu *iu;
struct srp_cmd *cmd;
struct ib_device *dev;
unsigned long flags;
u32 tag;
- u16 idx;
int len, ret;
scmnd->result = srp_chkready(target->rport);
@@ -2185,10 +2169,6 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
WARN_ON_ONCE(scmnd->request->tag < 0);
tag = blk_mq_unique_tag(scmnd->request);
ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
- idx = blk_mq_unique_tag_to_tag(tag);
- WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
- dev_name(&shost->shost_gendev), tag, idx,
- target->req_ring_size);
spin_lock_irqsave(&ch->lock, flags);
iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
@@ -2197,13 +2177,10 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
if (!iu)
goto err;
- req = &ch->req_ring[idx];
dev = target->srp_host->srp_dev->dev;
ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_it_iu_len,
DMA_TO_DEVICE);
- scmnd->host_scribble = (void *) req;
-
cmd = iu->buf;
memset(cmd, 0, sizeof *cmd);
@@ -2891,7 +2868,7 @@ static int srp_slave_configure(struct scsi_device *sdev)
return 0;
}
-static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
+static ssize_t id_ext_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
@@ -2899,7 +2876,9 @@ static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
return sysfs_emit(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
}
-static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
+static DEVICE_ATTR_RO(id_ext);
+
+static ssize_t ioc_guid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
@@ -2907,7 +2886,9 @@ static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
return sysfs_emit(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
}
-static ssize_t show_service_id(struct device *dev,
+static DEVICE_ATTR_RO(ioc_guid);
+
+static ssize_t service_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
@@ -2918,7 +2899,9 @@ static ssize_t show_service_id(struct device *dev,
be64_to_cpu(target->ib_cm.service_id));
}
-static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
+static DEVICE_ATTR_RO(service_id);
+
+static ssize_t pkey_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
@@ -2929,7 +2912,9 @@ static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
return sysfs_emit(buf, "0x%04x\n", be16_to_cpu(target->ib_cm.pkey));
}
-static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
+static DEVICE_ATTR_RO(pkey);
+
+static ssize_t sgid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
@@ -2937,7 +2922,9 @@ static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
return sysfs_emit(buf, "%pI6\n", target->sgid.raw);
}
-static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
+static DEVICE_ATTR_RO(sgid);
+
+static ssize_t dgid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
@@ -2949,8 +2936,10 @@ static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
return sysfs_emit(buf, "%pI6\n", ch->ib_cm.path.dgid.raw);
}
-static ssize_t show_orig_dgid(struct device *dev,
- struct device_attribute *attr, char *buf)
+static DEVICE_ATTR_RO(dgid);
+
+static ssize_t orig_dgid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
@@ -2960,8 +2949,10 @@ static ssize_t show_orig_dgid(struct device *dev,
return sysfs_emit(buf, "%pI6\n", target->ib_cm.orig_dgid.raw);
}
-static ssize_t show_req_lim(struct device *dev,
- struct device_attribute *attr, char *buf)
+static DEVICE_ATTR_RO(orig_dgid);
+
+static ssize_t req_lim_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
struct srp_rdma_ch *ch;
@@ -2975,7 +2966,9 @@ static ssize_t show_req_lim(struct device *dev,
return sysfs_emit(buf, "%d\n", req_lim);
}
-static ssize_t show_zero_req_lim(struct device *dev,
+static DEVICE_ATTR_RO(req_lim);
+
+static ssize_t zero_req_lim_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
@@ -2983,7 +2976,9 @@ static ssize_t show_zero_req_lim(struct device *dev,
return sysfs_emit(buf, "%d\n", target->zero_req_lim);
}
-static ssize_t show_local_ib_port(struct device *dev,
+static DEVICE_ATTR_RO(zero_req_lim);
+
+static ssize_t local_ib_port_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
@@ -2991,7 +2986,9 @@ static ssize_t show_local_ib_port(struct device *dev,
return sysfs_emit(buf, "%d\n", target->srp_host->port);
}
-static ssize_t show_local_ib_device(struct device *dev,
+static DEVICE_ATTR_RO(local_ib_port);
+
+static ssize_t local_ib_device_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
@@ -3000,7 +2997,9 @@ static ssize_t show_local_ib_device(struct device *dev,
dev_name(&target->srp_host->srp_dev->dev->dev));
}
-static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
+static DEVICE_ATTR_RO(local_ib_device);
+
+static ssize_t ch_count_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
@@ -3008,7 +3007,9 @@ static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
return sysfs_emit(buf, "%d\n", target->ch_count);
}
-static ssize_t show_comp_vector(struct device *dev,
+static DEVICE_ATTR_RO(ch_count);
+
+static ssize_t comp_vector_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
@@ -3016,7 +3017,9 @@ static ssize_t show_comp_vector(struct device *dev,
return sysfs_emit(buf, "%d\n", target->comp_vector);
}
-static ssize_t show_tl_retry_count(struct device *dev,
+static DEVICE_ATTR_RO(comp_vector);
+
+static ssize_t tl_retry_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
@@ -3024,7 +3027,9 @@ static ssize_t show_tl_retry_count(struct device *dev,
return sysfs_emit(buf, "%d\n", target->tl_retry_count);
}
-static ssize_t show_cmd_sg_entries(struct device *dev,
+static DEVICE_ATTR_RO(tl_retry_count);
+
+static ssize_t cmd_sg_entries_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
@@ -3032,7 +3037,9 @@ static ssize_t show_cmd_sg_entries(struct device *dev,
return sysfs_emit(buf, "%u\n", target->cmd_sg_cnt);
}
-static ssize_t show_allow_ext_sg(struct device *dev,
+static DEVICE_ATTR_RO(cmd_sg_entries);
+
+static ssize_t allow_ext_sg_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
@@ -3040,22 +3047,7 @@ static ssize_t show_allow_ext_sg(struct device *dev,
return sysfs_emit(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
}
-static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
-static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
-static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
-static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
-static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
-static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
-static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
-static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
-static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
-static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
-static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
-static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
-static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
-static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
-static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
-static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
+static DEVICE_ATTR_RO(allow_ext_sg);
static struct device_attribute *srp_host_attrs[] = {
&dev_attr_id_ext,
@@ -3084,6 +3076,8 @@ static struct scsi_host_template srp_template = {
.target_alloc = srp_target_alloc,
.slave_configure = srp_slave_configure,
.info = srp_target_info,
+ .init_cmd_priv = srp_init_cmd_priv,
+ .exit_cmd_priv = srp_exit_cmd_priv,
.queuecommand = srp_queuecommand,
.change_queue_depth = srp_change_queue_depth,
.eh_timed_out = srp_timed_out,
@@ -3097,6 +3091,7 @@ static struct scsi_host_template srp_template = {
.cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
.shost_attrs = srp_host_attrs,
.track_queue_depth = 1,
+ .cmd_size = sizeof(struct srp_request),
};
static int srp_sdev_count(struct Scsi_Host *host)
@@ -3617,9 +3612,9 @@ out:
return ret;
}
-static ssize_t srp_create_target(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t add_target_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct srp_host *host =
container_of(dev, struct srp_host, dev);
@@ -3676,8 +3671,6 @@ static ssize_t srp_create_target(struct device *dev,
if (ret)
goto out;
- target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
-
if (!srp_conn_unique(target->srp_host, target)) {
if (target->using_rdma_cm) {
shost_printk(KERN_INFO, target->scsi_host,
@@ -3780,10 +3773,6 @@ static ssize_t srp_create_target(struct device *dev,
if (ret)
goto err_disconnect;
- ret = srp_alloc_req_data(ch);
- if (ret)
- goto err_disconnect;
-
ret = srp_connect_ch(ch, max_iu_len, multich);
if (ret) {
char dst[64];
@@ -3802,7 +3791,6 @@ static ssize_t srp_create_target(struct device *dev,
goto free_ch;
} else {
srp_free_ch_ib(target, ch);
- srp_free_req_data(target, ch);
target->ch_count = ch - target->ch;
goto connected;
}
@@ -3863,16 +3851,15 @@ free_ch:
for (i = 0; i < target->ch_count; i++) {
ch = &target->ch[i];
srp_free_ch_ib(target, ch);
- srp_free_req_data(target, ch);
}
kfree(target->ch);
goto out;
}
-static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
+static DEVICE_ATTR_WO(add_target);
-static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
+static ssize_t ibdev_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct srp_host *host = container_of(dev, struct srp_host, dev);
@@ -3880,9 +3867,9 @@ static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
return sysfs_emit(buf, "%s\n", dev_name(&host->srp_dev->dev->dev));
}
-static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
+static DEVICE_ATTR_RO(ibdev);
-static ssize_t show_port(struct device *dev, struct device_attribute *attr,
+static ssize_t port_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct srp_host *host = container_of(dev, struct srp_host, dev);
@@ -3890,7 +3877,7 @@ static ssize_t show_port(struct device *dev, struct device_attribute *attr,
return sysfs_emit(buf, "%d\n", host->port);
}
-static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
+static DEVICE_ATTR_RO(port);
static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
{
@@ -4078,10 +4065,13 @@ static int __init srp_init_module(void)
{
int ret;
+ BUILD_BUG_ON(sizeof(struct srp_aer_req) != 36);
+ BUILD_BUG_ON(sizeof(struct srp_cmd) != 48);
BUILD_BUG_ON(sizeof(struct srp_imm_buf) != 4);
+ BUILD_BUG_ON(sizeof(struct srp_indirect_buf) != 20);
BUILD_BUG_ON(sizeof(struct srp_login_req) != 64);
BUILD_BUG_ON(sizeof(struct srp_login_req_rdma) != 56);
- BUILD_BUG_ON(sizeof(struct srp_cmd) != 48);
+ BUILD_BUG_ON(sizeof(struct srp_rsp) != 36);
if (srp_sg_tablesize) {
pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index 6818cac0a3b7..abccddeea1e3 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -174,7 +174,6 @@ struct srp_rdma_ch {
struct srp_iu **tx_ring;
struct srp_iu **rx_ring;
- struct srp_request *req_ring;
int comp_vector;
u64 tsk_mgmt_tag;
@@ -220,7 +219,6 @@ struct srp_target_port {
int mr_pool_size;
int mr_per_cmd;
int queue_size;
- int req_ring_size;
int comp_vector;
int tl_retry_count;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index ea447805d4ea..3cadf1295417 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -2858,7 +2858,6 @@ static void srpt_queue_response(struct se_cmd *cmd)
&ch->sq_wr_avail) < 0)) {
pr_warn("%s: IB send queue full (needed %d)\n",
__func__, ioctx->n_rdma);
- ret = -ENOMEM;
goto out;
}