summaryrefslogtreecommitdiff
path: root/include/rdma/ib_verbs.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-04-03 16:57:19 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-03 16:57:19 -0700
commit877f075aac900288ce2e6a64075cceff09210a7e (patch)
tree2bcf23e903403a2bfa3c6fac14d1334614e4d0f1 /include/rdma/ib_verbs.h
parent3cf59142bcd391680beb6932838fe09a73947705 (diff)
parentf7eaa7ed8fd46542275cf249cd934a366f6556bb (diff)
Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
Pull infiniband updates from Roland Dreier: "Main batch of InfiniBand/RDMA changes for 3.15: - The biggest change is core API extensions and mlx5 low-level driver support for handling DIF/DIX-style protection information, and the addition of PI support to the iSER initiator. Target support will be arriving shortly through the SCSI target tree. - A nice simplification to the "umem" memory pinning library now that we have chained sg lists. Kudos to Yishai Hadas for realizing our code didn't have to be so crazy. - Another nice simplification to the sg wrappers used by qib, ipath and ehca to handle their mapping of memory to adapter. - The usual batch of fixes to bugs found by static checkers etc. from intrepid people like Dan Carpenter and Yann Droneaud. - A large batch of cxgb4, ocrdma, qib driver updates" * tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (102 commits) RDMA/ocrdma: Unregister inet notifier when unloading ocrdma RDMA/ocrdma: Fix warnings about pointer <-> integer casts RDMA/ocrdma: Code clean-up RDMA/ocrdma: Display FW version RDMA/ocrdma: Query controller information RDMA/ocrdma: Support non-embedded mailbox commands RDMA/ocrdma: Handle CQ overrun error RDMA/ocrdma: Display proper value for max_mw RDMA/ocrdma: Use non-zero tag in SRQ posting RDMA/ocrdma: Memory leak fix in ocrdma_dereg_mr() RDMA/ocrdma: Increment abi version count RDMA/ocrdma: Update version string be2net: Add abi version between be2net and ocrdma RDMA/ocrdma: ABI versioning between ocrdma and be2net RDMA/ocrdma: Allow DPP QP creation RDMA/ocrdma: Read ASIC_ID register to select asic_gen RDMA/ocrdma: SQ and RQ doorbell offset clean up RDMA/ocrdma: EQ full catastrophe avoidance RDMA/cxgb4: Disable DSGL use by default RDMA/cxgb4: rx_data() needs to hold the ep mutex ...
Diffstat (limited to 'include/rdma/ib_verbs.h')
-rw-r--r--include/rdma/ib_verbs.h201
1 files changed, 192 insertions, 9 deletions
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 6793f32ccb58..acd825182977 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -122,7 +122,19 @@ enum ib_device_cap_flags {
IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22),
IB_DEVICE_MEM_WINDOW_TYPE_2A = (1<<23),
IB_DEVICE_MEM_WINDOW_TYPE_2B = (1<<24),
- IB_DEVICE_MANAGED_FLOW_STEERING = (1<<29)
+ IB_DEVICE_MANAGED_FLOW_STEERING = (1<<29),
+ IB_DEVICE_SIGNATURE_HANDOVER = (1<<30)
+};
+
+enum ib_signature_prot_cap {
+ IB_PROT_T10DIF_TYPE_1 = 1,
+ IB_PROT_T10DIF_TYPE_2 = 1 << 1,
+ IB_PROT_T10DIF_TYPE_3 = 1 << 2,
+};
+
+enum ib_signature_guard_cap {
+ IB_GUARD_T10DIF_CRC = 1,
+ IB_GUARD_T10DIF_CSUM = 1 << 1,
};
enum ib_atomic_cap {
@@ -172,6 +184,8 @@ struct ib_device_attr {
unsigned int max_fast_reg_page_list_len;
u16 max_pkeys;
u8 local_ca_ack_delay;
+ int sig_prot_cap;
+ int sig_guard_cap;
};
enum ib_mtu {
@@ -461,6 +475,130 @@ int ib_rate_to_mult(enum ib_rate rate) __attribute_const__;
*/
int ib_rate_to_mbps(enum ib_rate rate) __attribute_const__;
+enum ib_mr_create_flags {
+ IB_MR_SIGNATURE_EN = 1,
+};
+
+/**
+ * ib_mr_init_attr - Memory region init attributes passed to routine
+ * ib_create_mr.
+ * @max_reg_descriptors: max number of registration descriptors that
+ * may be used with registration work requests.
+ * @flags: MR creation flags bit mask.
+ */
+struct ib_mr_init_attr {
+ int max_reg_descriptors;
+ u32 flags;
+};
+
+enum ib_signature_type {
+ IB_SIG_TYPE_T10_DIF,
+};
+
+/**
+ * T10-DIF Signature types
+ * T10-DIF types are defined by SCSI
+ * specifications.
+ */
+enum ib_t10_dif_type {
+ IB_T10DIF_NONE,
+ IB_T10DIF_TYPE1,
+ IB_T10DIF_TYPE2,
+ IB_T10DIF_TYPE3
+};
+
+/**
+ * Signature T10-DIF block-guard types
+ * IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules.
+ * IB_T10DIF_CSUM: Corresponds to IP checksum rules.
+ */
+enum ib_t10_dif_bg_type {
+ IB_T10DIF_CRC,
+ IB_T10DIF_CSUM
+};
+
+/**
+ * struct ib_t10_dif_domain - Parameters specific for T10-DIF
+ * domain.
+ * @type: T10-DIF type (0|1|2|3)
+ * @bg_type: T10-DIF block guard type (CRC|CSUM)
+ * @pi_interval: protection information interval.
+ * @bg: seed of guard computation.
+ * @app_tag: application tag of guard block
+ * @ref_tag: initial guard block reference tag.
+ * @type3_inc_reftag: T10-DIF type 3 does not state
+ * about the reference tag, it is the user
+ * choice to increment it or not.
+ */
+struct ib_t10_dif_domain {
+ enum ib_t10_dif_type type;
+ enum ib_t10_dif_bg_type bg_type;
+ u16 pi_interval;
+ u16 bg;
+ u16 app_tag;
+ u32 ref_tag;
+ bool type3_inc_reftag;
+};
+
+/**
+ * struct ib_sig_domain - Parameters for signature domain
+ * @sig_type: specific signauture type
+ * @sig: union of all signature domain attributes that may
+ * be used to set domain layout.
+ */
+struct ib_sig_domain {
+ enum ib_signature_type sig_type;
+ union {
+ struct ib_t10_dif_domain dif;
+ } sig;
+};
+
+/**
+ * struct ib_sig_attrs - Parameters for signature handover operation
+ * @check_mask: bitmask for signature byte check (8 bytes)
+ * @mem: memory domain layout desciptor.
+ * @wire: wire domain layout desciptor.
+ */
+struct ib_sig_attrs {
+ u8 check_mask;
+ struct ib_sig_domain mem;
+ struct ib_sig_domain wire;
+};
+
+enum ib_sig_err_type {
+ IB_SIG_BAD_GUARD,
+ IB_SIG_BAD_REFTAG,
+ IB_SIG_BAD_APPTAG,
+};
+
+/**
+ * struct ib_sig_err - signature error descriptor
+ */
+struct ib_sig_err {
+ enum ib_sig_err_type err_type;
+ u32 expected;
+ u32 actual;
+ u64 sig_err_offset;
+ u32 key;
+};
+
+enum ib_mr_status_check {
+ IB_MR_CHECK_SIG_STATUS = 1,
+};
+
+/**
+ * struct ib_mr_status - Memory region status container
+ *
+ * @fail_status: Bitmask of MR checks status. For each
+ * failed check a corresponding status bit is set.
+ * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
+ * failure.
+ */
+struct ib_mr_status {
+ u32 fail_status;
+ struct ib_sig_err sig_err;
+};
+
/**
* mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
* enum.
@@ -644,6 +782,7 @@ enum ib_qp_create_flags {
IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
IB_QP_CREATE_NETIF_QP = 1 << 5,
+ IB_QP_CREATE_SIGNATURE_EN = 1 << 6,
/* reserve bits 26-31 for low level drivers' internal use */
IB_QP_CREATE_RESERVED_START = 1 << 26,
IB_QP_CREATE_RESERVED_END = 1 << 31,
@@ -808,6 +947,7 @@ enum ib_wr_opcode {
IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
IB_WR_BIND_MW,
+ IB_WR_REG_SIG_MR,
/* reserve values for low level drivers' internal use.
* These values will not be used at all in the ib core layer.
*/
@@ -913,6 +1053,12 @@ struct ib_send_wr {
u32 rkey;
struct ib_mw_bind_info bind_info;
} bind_mw;
+ struct {
+ struct ib_sig_attrs *sig_attrs;
+ struct ib_mr *sig_mr;
+ int access_flags;
+ struct ib_sge *prot;
+ } sig_handover;
} wr;
u32 xrc_remote_srq_num; /* XRC TGT QPs only */
};
@@ -1266,10 +1412,6 @@ struct ib_dma_mapping_ops {
void (*unmap_sg)(struct ib_device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction direction);
- u64 (*dma_address)(struct ib_device *dev,
- struct scatterlist *sg);
- unsigned int (*dma_len)(struct ib_device *dev,
- struct scatterlist *sg);
void (*sync_single_for_cpu)(struct ib_device *dev,
u64 dma_handle,
size_t size,
@@ -1407,6 +1549,9 @@ struct ib_device {
int (*query_mr)(struct ib_mr *mr,
struct ib_mr_attr *mr_attr);
int (*dereg_mr)(struct ib_mr *mr);
+ int (*destroy_mr)(struct ib_mr *mr);
+ struct ib_mr * (*create_mr)(struct ib_pd *pd,
+ struct ib_mr_init_attr *mr_init_attr);
struct ib_mr * (*alloc_fast_reg_mr)(struct ib_pd *pd,
int max_page_list_len);
struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device,
@@ -1455,6 +1600,8 @@ struct ib_device {
*flow_attr,
int domain);
int (*destroy_flow)(struct ib_flow *flow_id);
+ int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
+ struct ib_mr_status *mr_status);
struct ib_dma_mapping_ops *dma_ops;
@@ -2089,12 +2236,13 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
* ib_sg_dma_address - Return the DMA address from a scatter/gather entry
* @dev: The device for which the DMA addresses were created
* @sg: The scatter/gather entry
+ *
+ * Note: this function is obsolete. To do: change all occurrences of
+ * ib_sg_dma_address() into sg_dma_address().
*/
static inline u64 ib_sg_dma_address(struct ib_device *dev,
struct scatterlist *sg)
{
- if (dev->dma_ops)
- return dev->dma_ops->dma_address(dev, sg);
return sg_dma_address(sg);
}
@@ -2102,12 +2250,13 @@ static inline u64 ib_sg_dma_address(struct ib_device *dev,
* ib_sg_dma_len - Return the DMA length from a scatter/gather entry
* @dev: The device for which the DMA addresses were created
* @sg: The scatter/gather entry
+ *
+ * Note: this function is obsolete. To do: change all occurrences of
+ * ib_sg_dma_len() into sg_dma_len().
*/
static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
struct scatterlist *sg)
{
- if (dev->dma_ops)
- return dev->dma_ops->dma_len(dev, sg);
return sg_dma_len(sg);
}
@@ -2250,6 +2399,25 @@ int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
*/
int ib_dereg_mr(struct ib_mr *mr);
+
+/**
+ * ib_create_mr - Allocates a memory region that may be used for
+ * signature handover operations.
+ * @pd: The protection domain associated with the region.
+ * @mr_init_attr: memory region init attributes.
+ */
+struct ib_mr *ib_create_mr(struct ib_pd *pd,
+ struct ib_mr_init_attr *mr_init_attr);
+
+/**
+ * ib_destroy_mr - Destroys a memory region that was created using
+ * ib_create_mr and removes it from HW translation tables.
+ * @mr: The memory region to destroy.
+ *
+ * This function can fail, if the memory region has memory windows bound to it.
+ */
+int ib_destroy_mr(struct ib_mr *mr);
+
/**
* ib_alloc_fast_reg_mr - Allocates memory region usable with the
* IB_WR_FAST_REG_MR send work request.
@@ -2435,4 +2603,19 @@ static inline int ib_check_mr_access(int flags)
return 0;
}
+/**
+ * ib_check_mr_status: lightweight check of MR status.
+ * This routine may provide status checks on a selected
+ * ib_mr. first use is for signature status check.
+ *
+ * @mr: A memory region.
+ * @check_mask: Bitmask of which checks to perform from
+ * ib_mr_status_check enumeration.
+ * @mr_status: The container of relevant status checks.
+ * failed checks will be indicated in the status bitmask
+ * and the relevant info shall be in the error item.
+ */
+int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
+ struct ib_mr_status *mr_status);
+
#endif /* IB_VERBS_H */