diff options
author | Joshua Hay <joshua.a.hay@intel.com> | 2023-08-07 17:34:12 -0700 |
---|---|---|
committer | Tony Nguyen <anthony.l.nguyen@intel.com> | 2023-09-13 14:59:24 -0700 |
commit | c2d548cad1508d334517bcbd7cd5c915cc831fc0 (patch) | |
tree | 062cc59d628a7674db66f1b4776c927e109fe795 /drivers/net/ethernet/intel/idpf/idpf.h | |
parent | 6818c4d5b3c2e9dd5d7316792f88d371d07d0ebf (diff) |
idpf: add TX splitq napi poll support
Add support to handle the interrupts for the TX completion queue and
process the various completion types.
In the flow scheduling mode, the driver processes primarily buffer
completions as well as descriptor completions occasionally. This mode
supports out of order TX completions. To do so, HW generates one buffer
completion per packet. Each of those completions contains the unique tag
provided during the TX encoding which is used to locate the packet either
on the TX buffer ring or in a hash table. The hash table is used to track
TX buffer information so the descriptor(s) for a given packet can be
reused while the driver is still waiting on the buffer completion(s).
Packets end up in the hash table in one of 2 ways: 1) a packet was
stashed during descriptor completion cleaning, or 2) because an out of
order buffer completion was processed. A descriptor completion arrives
only every so often and is primarily used to guarantee the TX descriptor
ring can be reused without having to wait on the individual buffer
completions. E.g. a descriptor completion for N+16 guarantees HW read all
of the descriptors for packets N through N+15, therefore all of the
buffers for packets N through N+15 are stashed into the hash table and the
descriptors can be reused for more TX packets. Similarly, a packet can be
stashed in the hash table because an out an order buffer completion was
processed. E.g. processing a buffer completion for packet N+3 implies that
HW read all of the descriptors for packets N through N+3 and they can be
reused. However, the HW did not do the DMA yet. The buffers for packets N
through N+2 cannot be freed, so they are stashed in the hash table.
In either case, the buffer completions will eventually be processed for
all of the stashed packets, and all of the buffers will be cleaned from
the hash table.
In queue based scheduling mode, the driver processes primarily descriptor
completions and cleans the TX ring the conventional way.
Finally, the driver triggers a TX queue drain after sending the disable
queues virtchnl message. When the HW completes the queue draining, it
sends the driver a queue marker packet completion. The driver determines
when all TX queues have been drained and proceeds with the disable flow.
With this, the driver can send TX packets and clean up the resources
properly.
Signed-off-by: Joshua Hay <joshua.a.hay@intel.com>
Co-developed-by: Alan Brady <alan.brady@intel.com>
Signed-off-by: Alan Brady <alan.brady@intel.com>
Co-developed-by: Madhu Chittim <madhu.chittim@intel.com>
Signed-off-by: Madhu Chittim <madhu.chittim@intel.com>
Co-developed-by: Phani Burra <phani.r.burra@intel.com>
Signed-off-by: Phani Burra <phani.r.burra@intel.com>
Reviewed-by: Sridhar Samudrala <sridhar.samudrala@intel.com>
Reviewed-by: Willem de Bruijn <willemb@google.com>
Co-developed-by: Pavan Kumar Linga <pavan.kumar.linga@intel.com>
Signed-off-by: Pavan Kumar Linga <pavan.kumar.linga@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/idpf/idpf.h')
-rw-r--r-- | drivers/net/ethernet/intel/idpf/idpf.h | 22 |
1 files changed, 22 insertions, 0 deletions
diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h index 2aae1c6a1628..da62ed197fdb 100644 --- a/drivers/net/ethernet/intel/idpf/idpf.h +++ b/drivers/net/ethernet/intel/idpf/idpf.h @@ -14,6 +14,7 @@ struct idpf_vport_max_q; #include <linux/etherdevice.h> #include <linux/pci.h> #include <linux/bitfield.h> +#include <linux/dim.h> #include "virtchnl2.h" #include "idpf_lan_txrx.h" @@ -41,6 +42,8 @@ struct idpf_vport_max_q; /* available message levels */ #define IDPF_AVAIL_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) +#define IDPF_DIM_PROFILE_SLOTS 5 + #define IDPF_VIRTCHNL_VERSION_MAJOR VIRTCHNL2_VERSION_MAJOR_2 #define IDPF_VIRTCHNL_VERSION_MINOR VIRTCHNL2_VERSION_MINOR_0 @@ -255,11 +258,23 @@ enum idpf_vport_vc_state { extern const char * const idpf_vport_vc_state_str[]; /** + * enum idpf_vport_flags - Vport flags + * @IDPF_VPORT_SW_MARKER: Indicate TX pipe drain software marker packets + * processing is done + * @IDPF_VPORT_FLAGS_NBITS: Must be last + */ +enum idpf_vport_flags { + IDPF_VPORT_SW_MARKER, + IDPF_VPORT_FLAGS_NBITS, +}; + +/** * struct idpf_vport - Handle for netdevices and queue resources * @num_txq: Number of allocated TX queues * @num_complq: Number of allocated completion queues * @txq_desc_count: TX queue descriptor count * @complq_desc_count: Completion queue descriptor count + * @compln_clean_budget: Work budget for completion clean * @num_txq_grp: Number of TX queue groups * @txq_grps: Array of TX queue groups * @txq_model: Split queue or single queue queuing model @@ -280,6 +295,7 @@ extern const char * const idpf_vport_vc_state_str[]; * @adapter: back pointer to associated adapter * @netdev: Associated net_device. Each vport should have one and only one * associated netdev. + * @flags: See enum idpf_vport_flags * @vport_type: Default SRIOV, SIOV, etc. * @vport_id: Device given vport identifier * @idx: Software index in adapter vports struct @@ -290,10 +306,12 @@ extern const char * const idpf_vport_vc_state_str[]; * @q_vector_idxs: Starting index of queue vectors * @max_mtu: device given max possible MTU * @default_mac_addr: device will give a default MAC to use + * @tx_itr_profile: TX profiles for Dynamic Interrupt Moderation * @link_up: True if link is up * @vc_msg: Virtchnl message buffer * @vc_state: Virtchnl message state * @vchnl_wq: Wait queue for virtchnl messages + * @sw_marker_wq: workqueue for marker packets * @vc_buf_lock: Lock to protect virtchnl buffer */ struct idpf_vport { @@ -301,6 +319,7 @@ struct idpf_vport { u16 num_complq; u32 txq_desc_count; u32 complq_desc_count; + u32 compln_clean_budget; u16 num_txq_grp; struct idpf_txq_group *txq_grps; u32 txq_model; @@ -319,6 +338,7 @@ struct idpf_vport { struct idpf_adapter *adapter; struct net_device *netdev; + DECLARE_BITMAP(flags, IDPF_VPORT_FLAGS_NBITS); u16 vport_type; u32 vport_id; u16 idx; @@ -330,6 +350,7 @@ struct idpf_vport { u16 *q_vector_idxs; u16 max_mtu; u8 default_mac_addr[ETH_ALEN]; + u16 tx_itr_profile[IDPF_DIM_PROFILE_SLOTS]; bool link_up; @@ -337,6 +358,7 @@ struct idpf_vport { DECLARE_BITMAP(vc_state, IDPF_VC_NBITS); wait_queue_head_t vchnl_wq; + wait_queue_head_t sw_marker_wq; struct mutex vc_buf_lock; }; |