summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/ice/ice_txrx.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_txrx.h')
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h109
1 files changed, 69 insertions, 40 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index bf40608f9bac..dcbdfe649a09 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -154,7 +154,7 @@ struct ice_tx_buf {
struct ice_tx_offload_params {
u64 cd_qw1;
- struct ice_ring *tx_ring;
+ struct ice_tx_ring *tx_ring;
u32 td_cmd;
u32 td_offset;
u32 td_l2tag1;
@@ -251,9 +251,9 @@ enum ice_dynamic_itr {
#define ICE_TX_LEGACY 1
/* descriptor ring, associated with a VSI */
-struct ice_ring {
+struct ice_rx_ring {
/* CL1 - 1st cacheline starts here */
- struct ice_ring *next; /* pointer to next ring in q_vector */
+ struct ice_rx_ring *next; /* pointer to next ring in q_vector */
void *desc; /* Descriptor ring memory */
struct device *dev; /* Used for DMA mapping */
struct net_device *netdev; /* netdev ring maps to */
@@ -261,13 +261,13 @@ struct ice_ring {
struct ice_q_vector *q_vector; /* Backreference to associated vector */
u8 __iomem *tail;
union {
- struct ice_tx_buf *tx_buf;
struct ice_rx_buf *rx_buf;
struct xdp_buff **xdp_buf;
};
/* CL2 - 2nd cacheline starts here */
+ struct xdp_rxq_info xdp_rxq;
+ /* CL3 - 3rd cacheline starts here */
u16 q_index; /* Queue number of ring */
- u16 q_handle; /* Queue handle per TC */
u16 count; /* Number of descriptors */
u16 reg_idx; /* HW register index of the ring */
@@ -276,56 +276,79 @@ struct ice_ring {
u16 next_to_use;
u16 next_to_clean;
u16 next_to_alloc;
+ u16 rx_offset;
+ u16 rx_buf_len;
/* stats structs */
+ struct ice_rxq_stats rx_stats;
struct ice_q_stats stats;
struct u64_stats_sync syncp;
- union {
- struct ice_txq_stats tx_stats;
- struct ice_rxq_stats rx_stats;
- };
struct rcu_head rcu; /* to avoid race on free */
- DECLARE_BITMAP(xps_state, ICE_TX_NBITS); /* XPS Config State */
+ /* CL4 - 3rd cacheline starts here */
struct bpf_prog *xdp_prog;
struct xsk_buff_pool *xsk_pool;
- u16 rx_offset;
- /* CL3 - 3rd cacheline starts here */
- struct xdp_rxq_info xdp_rxq;
struct sk_buff *skb;
- /* CLX - the below items are only accessed infrequently and should be
- * in their own cache line if possible
- */
-#define ICE_TX_FLAGS_RING_XDP BIT(0)
+ dma_addr_t dma; /* physical address of ring */
#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
+ u64 cached_phctime;
+ u8 dcb_tc; /* Traffic class of ring */
+ u8 ptp_rx;
u8 flags;
+} ____cacheline_internodealigned_in_smp;
+
+struct ice_tx_ring {
+ /* CL1 - 1st cacheline starts here */
+ struct ice_tx_ring *next; /* pointer to next ring in q_vector */
+ void *desc; /* Descriptor ring memory */
+ struct device *dev; /* Used for DMA mapping */
+ u8 __iomem *tail;
+ struct ice_tx_buf *tx_buf;
+ struct ice_q_vector *q_vector; /* Backreference to associated vector */
+ struct net_device *netdev; /* netdev ring maps to */
+ struct ice_vsi *vsi; /* Backreference to associated VSI */
+ /* CL2 - 2nd cacheline starts here */
dma_addr_t dma; /* physical address of ring */
- unsigned int size; /* length of descriptor ring in bytes */
+ u16 next_to_use;
+ u16 next_to_clean;
+ u16 count; /* Number of descriptors */
+ u16 q_index; /* Queue number of ring */
+ struct xsk_buff_pool *xsk_pool;
+
+ /* stats structs */
+ struct ice_q_stats stats;
+ struct u64_stats_sync syncp;
+ struct ice_txq_stats tx_stats;
+
+ /* CL3 - 3rd cacheline starts here */
+ struct rcu_head rcu; /* to avoid race on free */
+ DECLARE_BITMAP(xps_state, ICE_TX_NBITS); /* XPS Config State */
+ struct ice_ptp_tx *tx_tstamps;
u32 txq_teid; /* Added Tx queue TEID */
- u16 rx_buf_len;
+ u16 q_handle; /* Queue handle per TC */
+ u16 reg_idx; /* HW register index of the ring */
+#define ICE_TX_FLAGS_RING_XDP BIT(0)
+ u8 flags;
u8 dcb_tc; /* Traffic class of ring */
- struct ice_ptp_tx *tx_tstamps;
- u64 cached_phctime;
- u8 ptp_rx:1;
- u8 ptp_tx:1;
+ u8 ptp_tx;
} ____cacheline_internodealigned_in_smp;
-static inline bool ice_ring_uses_build_skb(struct ice_ring *ring)
+static inline bool ice_ring_uses_build_skb(struct ice_rx_ring *ring)
{
return !!(ring->flags & ICE_RX_FLAGS_RING_BUILD_SKB);
}
-static inline void ice_set_ring_build_skb_ena(struct ice_ring *ring)
+static inline void ice_set_ring_build_skb_ena(struct ice_rx_ring *ring)
{
ring->flags |= ICE_RX_FLAGS_RING_BUILD_SKB;
}
-static inline void ice_clear_ring_build_skb_ena(struct ice_ring *ring)
+static inline void ice_clear_ring_build_skb_ena(struct ice_rx_ring *ring)
{
ring->flags &= ~ICE_RX_FLAGS_RING_BUILD_SKB;
}
-static inline bool ice_ring_is_xdp(struct ice_ring *ring)
+static inline bool ice_ring_is_xdp(struct ice_tx_ring *ring)
{
return !!(ring->flags & ICE_TX_FLAGS_RING_XDP);
}
@@ -337,7 +360,10 @@ enum ice_container_type {
struct ice_ring_container {
/* head of linked-list of rings */
- struct ice_ring *ring;
+ union {
+ struct ice_rx_ring *rx_ring;
+ struct ice_tx_ring *tx_ring;
+ };
struct dim dim; /* data for net_dim algorithm */
u16 itr_idx; /* index in the interrupt vector */
/* this matches the maximum number of ITR bits, but in usec
@@ -358,10 +384,13 @@ struct ice_coalesce_stored {
};
/* iterator for handling rings in ring container */
-#define ice_for_each_ring(pos, head) \
- for (pos = (head).ring; pos; pos = pos->next)
+#define ice_for_each_rx_ring(pos, head) \
+ for (pos = (head).rx_ring; pos; pos = pos->next)
+
+#define ice_for_each_tx_ring(pos, head) \
+ for (pos = (head).tx_ring; pos; pos = pos->next)
-static inline unsigned int ice_rx_pg_order(struct ice_ring *ring)
+static inline unsigned int ice_rx_pg_order(struct ice_rx_ring *ring)
{
#if (PAGE_SIZE < 8192)
if (ring->rx_buf_len > (PAGE_SIZE / 2))
@@ -374,21 +403,21 @@ static inline unsigned int ice_rx_pg_order(struct ice_ring *ring)
union ice_32b_rx_flex_desc;
-bool ice_alloc_rx_bufs(struct ice_ring *rxr, u16 cleaned_count);
+bool ice_alloc_rx_bufs(struct ice_rx_ring *rxr, u16 cleaned_count);
netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev);
u16
ice_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev);
-void ice_clean_tx_ring(struct ice_ring *tx_ring);
-void ice_clean_rx_ring(struct ice_ring *rx_ring);
-int ice_setup_tx_ring(struct ice_ring *tx_ring);
-int ice_setup_rx_ring(struct ice_ring *rx_ring);
-void ice_free_tx_ring(struct ice_ring *tx_ring);
-void ice_free_rx_ring(struct ice_ring *rx_ring);
+void ice_clean_tx_ring(struct ice_tx_ring *tx_ring);
+void ice_clean_rx_ring(struct ice_rx_ring *rx_ring);
+int ice_setup_tx_ring(struct ice_tx_ring *tx_ring);
+int ice_setup_rx_ring(struct ice_rx_ring *rx_ring);
+void ice_free_tx_ring(struct ice_tx_ring *tx_ring);
+void ice_free_rx_ring(struct ice_rx_ring *rx_ring);
int ice_napi_poll(struct napi_struct *napi, int budget);
int
ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
u8 *raw_packet);
-int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget);
-void ice_clean_ctrl_tx_irq(struct ice_ring *tx_ring);
+int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget);
+void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring);
#endif /* _ICE_TXRX_H_ */