summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/nes/nes_cm.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-21 10:33:42 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-21 10:33:42 -0700
commit0c2fe82a9b106f1c03719783134360586d718a69 (patch)
treeec1a18ca49f1e6724ef3a93727f5f73b7df61931 /drivers/infiniband/hw/nes/nes_cm.c
parent5f0e685f316a1de6d3af8b23eaf46651faca32ab (diff)
parentf0e88aeb19dac00ed2e09fd4d39ee65f32d5e968 (diff)
Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
Pull InfiniBand/RDMA changes for the 3.4 merge window from Roland Dreier: "Nothing big really stands out; by patch count lots of fixes to the mlx4 driver plus some cleanups and fixes to the core and other drivers." * tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (28 commits) mlx4_core: Scale size of MTT table with system RAM mlx4_core: Allow dynamic MTU configuration for IB ports IB/mlx4: Fix info returned when querying IBoE ports IB/mlx4: Fix possible missed completion event mlx4_core: Report thermal error events mlx4_core: Fix one more static exported function IB: Change CQE "csum_ok" field to a bit flag RDMA/iwcm: Reject connect requests if cmid is not in LISTEN state RDMA/cxgb3: Don't pass irq flags to flush_qp() mlx4_core: Get rid of redundant ext_port_cap flags RDMA/ucma: Fix AB-BA deadlock IB/ehca: Fix ilog2() compile failure IB: Use central enum for speed instead of hard-coded values IB/iser: Post initial receive buffers before sending the final login request IB/iser: Free IB connection resources in the proper place IB/srp: Consolidate repetitive sysfs code IB/srp: Use pr_fmt() and pr_err()/pr_warn() IB/core: Fix SDR rates in sysfs mlx4: Enforce device max FMR maps in FMR alloc IB/mlx4: Set bad_wr for invalid send opcode ...
Diffstat (limited to 'drivers/infiniband/hw/nes/nes_cm.c')
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c39
1 files changed, 22 insertions, 17 deletions
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index c5e4cb2d3223..71edfbbcce1c 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -338,18 +338,21 @@ static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 *type,
case IETF_MPA_V2: {
u16 ird_size;
u16 ord_size;
+ u16 rtr_ctrl_ird;
+ u16 rtr_ctrl_ord;
+
mpa_v2_frame = (struct ietf_mpa_v2 *)buffer;
mpa_hdr_len += IETF_RTR_MSG_SIZE;
cm_node->mpa_frame_size -= IETF_RTR_MSG_SIZE;
rtr_msg = &mpa_v2_frame->rtr_msg;
/* parse rtr message */
- rtr_msg->ctrl_ird = ntohs(rtr_msg->ctrl_ird);
- rtr_msg->ctrl_ord = ntohs(rtr_msg->ctrl_ord);
- ird_size = rtr_msg->ctrl_ird & IETF_NO_IRD_ORD;
- ord_size = rtr_msg->ctrl_ord & IETF_NO_IRD_ORD;
+ rtr_ctrl_ird = ntohs(rtr_msg->ctrl_ird);
+ rtr_ctrl_ord = ntohs(rtr_msg->ctrl_ord);
+ ird_size = rtr_ctrl_ird & IETF_NO_IRD_ORD;
+ ord_size = rtr_ctrl_ord & IETF_NO_IRD_ORD;
- if (!(rtr_msg->ctrl_ird & IETF_PEER_TO_PEER)) {
+ if (!(rtr_ctrl_ird & IETF_PEER_TO_PEER)) {
/* send reset */
return -EINVAL;
}
@@ -370,9 +373,9 @@ static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 *type,
}
}
- if (rtr_msg->ctrl_ord & IETF_RDMA0_READ) {
+ if (rtr_ctrl_ord & IETF_RDMA0_READ) {
cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;
- } else if (rtr_msg->ctrl_ord & IETF_RDMA0_WRITE) {
+ } else if (rtr_ctrl_ord & IETF_RDMA0_WRITE) {
cm_node->send_rdma0_op = SEND_RDMA_WRITE_ZERO;
} else { /* Not supported RDMA0 operation */
return -EINVAL;
@@ -543,6 +546,8 @@ static void build_mpa_v2(struct nes_cm_node *cm_node,
{
struct ietf_mpa_v2 *mpa_frame = (struct ietf_mpa_v2 *)start_addr;
struct ietf_rtr_msg *rtr_msg = &mpa_frame->rtr_msg;
+ u16 ctrl_ird;
+ u16 ctrl_ord;
/* initialize the upper 5 bytes of the frame */
build_mpa_v1(cm_node, start_addr, mpa_key);
@@ -550,31 +555,31 @@ static void build_mpa_v2(struct nes_cm_node *cm_node,
mpa_frame->priv_data_len += htons(IETF_RTR_MSG_SIZE);
/* initialize RTR msg */
- rtr_msg->ctrl_ird = (cm_node->ird_size > IETF_NO_IRD_ORD) ?
+ ctrl_ird = (cm_node->ird_size > IETF_NO_IRD_ORD) ?
IETF_NO_IRD_ORD : cm_node->ird_size;
- rtr_msg->ctrl_ord = (cm_node->ord_size > IETF_NO_IRD_ORD) ?
+ ctrl_ord = (cm_node->ord_size > IETF_NO_IRD_ORD) ?
IETF_NO_IRD_ORD : cm_node->ord_size;
- rtr_msg->ctrl_ird |= IETF_PEER_TO_PEER;
- rtr_msg->ctrl_ird |= IETF_FLPDU_ZERO_LEN;
+ ctrl_ird |= IETF_PEER_TO_PEER;
+ ctrl_ird |= IETF_FLPDU_ZERO_LEN;
switch (mpa_key) {
case MPA_KEY_REQUEST:
- rtr_msg->ctrl_ord |= IETF_RDMA0_WRITE;
- rtr_msg->ctrl_ord |= IETF_RDMA0_READ;
+ ctrl_ord |= IETF_RDMA0_WRITE;
+ ctrl_ord |= IETF_RDMA0_READ;
break;
case MPA_KEY_REPLY:
switch (cm_node->send_rdma0_op) {
case SEND_RDMA_WRITE_ZERO:
- rtr_msg->ctrl_ord |= IETF_RDMA0_WRITE;
+ ctrl_ord |= IETF_RDMA0_WRITE;
break;
case SEND_RDMA_READ_ZERO:
- rtr_msg->ctrl_ord |= IETF_RDMA0_READ;
+ ctrl_ord |= IETF_RDMA0_READ;
break;
}
}
- rtr_msg->ctrl_ird = htons(rtr_msg->ctrl_ird);
- rtr_msg->ctrl_ord = htons(rtr_msg->ctrl_ord);
+ rtr_msg->ctrl_ird = htons(ctrl_ird);
+ rtr_msg->ctrl_ord = htons(ctrl_ord);
}
/**