diff options
author | Haiyang Zhang <haiyangz@microsoft.com> | 2023-08-04 13:33:53 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2023-08-06 08:36:06 +0100 |
commit | b1d13f7a3b5396503e6869ed627bb4eeab9b524f (patch) | |
tree | 95e668bcb514876b288feaa03eecd498e062f10a /include | |
parent | 48ae409aaf1ac2dd6f7a3e643f296a99bf6d67bb (diff) |
net: mana: Add page pool for RX buffers
Add page pool for RX buffers for faster buffer cycle and reduce CPU
usage.
The standard page pool API is used.
With iperf and 128 threads test, this patch improved the throughput
by 12-15%, and decreased the IRQ associated CPU's usage from 99-100% to
10-50%.
Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com>
Reviewed-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r-- | include/net/mana/mana.h | 3 |
1 files changed, 3 insertions, 0 deletions
diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h index 1ccdca03e166..879990101c9f 100644 --- a/include/net/mana/mana.h +++ b/include/net/mana/mana.h @@ -282,6 +282,7 @@ struct mana_recv_buf_oob { struct gdma_wqe_request wqe_req; void *buf_va; + bool from_pool; /* allocated from a page pool */ /* SGL of the buffer going to be sent has part of the work request. */ u32 num_sge; @@ -332,6 +333,8 @@ struct mana_rxq { bool xdp_flush; int xdp_rc; /* XDP redirect return code */ + struct page_pool *page_pool; + /* MUST BE THE LAST MEMBER: * Each receive buffer has an associated mana_recv_buf_oob. */ |