diff options
author | Dragos Tatulea <dtatulea@nvidia.com> | 2024-06-04 00:22:12 +0300 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2024-06-05 20:20:46 -0700 |
commit | d34d7d1973c4d1125c430a9612d97bfc10cf6382 (patch) | |
tree | 90a473ad8fb9ae5589a55ce5309f1630d28c07fb | |
parent | e839ac9a89cb3bf1aa1652676fa3d6c79810e55d (diff) |
net/mlx5e: SHAMPO, Specialize mlx5e_fill_skb_data()
mlx5e_fill_skb_data() used to have multiple callers. But after the XDP
multibuf refactoring from commit 2cb0e27d43b4 ("net/mlx5e: RX, Prepare
non-linear striding RQ for XDP multi-buffer support") the SHAMPO code
path is the only caller.
Take advantage of this and specialize the function:
- Drop the redundant check.
- Assume that data_bcnt is > 0. This is needed in a downstream patch.
Rename the function as well to make things clear.
Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com>
Suggested-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Link: https://lore.kernel.org/r/20240603212219.1037656-8-tariqt@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 25 |
1 files changed, 11 insertions, 14 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index bb59ee0b1567..1e3a5b2afeae 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -1948,21 +1948,16 @@ const struct mlx5e_rx_handlers mlx5e_rx_handlers_rep = { #endif static void -mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq, - struct mlx5e_frag_page *frag_page, - u32 data_bcnt, u32 data_offset) +mlx5e_shampo_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq, + struct mlx5e_frag_page *frag_page, + u32 data_bcnt, u32 data_offset) { net_prefetchw(skb->data); - while (data_bcnt) { + do { /* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */ u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - data_offset, data_bcnt); - unsigned int truesize; - - if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) - truesize = pg_consumed_bytes; - else - truesize = ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz)); + unsigned int truesize = pg_consumed_bytes; frag_page->frags++; mlx5e_add_skb_frag(rq, skb, frag_page->page, data_offset, @@ -1971,7 +1966,7 @@ mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq, data_bcnt -= pg_consumed_bytes; data_offset = 0; frag_page++; - } + } while (data_bcnt); } static struct sk_buff * @@ -2330,10 +2325,12 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq } if (likely(head_size)) { - struct mlx5e_frag_page *frag_page; + if (data_bcnt) { + struct mlx5e_frag_page *frag_page; - frag_page = &wi->alloc_units.frag_pages[page_idx]; - mlx5e_fill_skb_data(*skb, rq, frag_page, data_bcnt, data_offset); + frag_page = &wi->alloc_units.frag_pages[page_idx]; + mlx5e_shampo_fill_skb_data(*skb, rq, frag_page, data_bcnt, data_offset); + } } mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb); |