summaryrefslogtreecommitdiff
path: root/net/core/skbuff.c
diff options
context:
space:
mode:
authorJesper Dangaard Brouer <brouer@redhat.com>2023-01-13 14:52:04 +0100
committerPaolo Abeni <pabeni@redhat.com>2023-01-17 10:27:29 +0100
commiteedade12f4cb7284555c4c0314485e9575c70ab7 (patch)
tree15cc4283ae175c98748a18395edd6f06455ba8e5 /net/core/skbuff.c
parenta4650da2a2d6150a8ff1ea36fde9f6a26cf5fda3 (diff)
net: kfree_skb_list use kmem_cache_free_bulk
The kfree_skb_list function walks SKB (via skb->next) and frees them individually to the SLUB/SLAB allocator (kmem_cache). It is more efficient to bulk free them via the kmem_cache_free_bulk API. This patches create a stack local array with SKBs to bulk free while walking the list. Bulk array size is limited to 16 SKBs to trade off stack usage and efficiency. The SLUB kmem_cache "skbuff_head_cache" uses objsize 256 bytes usually in an order-1 page 8192 bytes that is 32 objects per slab (can vary on archs and due to SLUB sharing). Thus, for SLUB the optimal bulk free case is 32 objects belonging to same slab, but runtime this isn't likely to occur. The expected gain from using kmem_cache bulk alloc and free API have been assessed via a microbencmark kernel module[1]. The module 'slab_bulk_test01' results at bulk 16 element: kmem-in-loop Per elem: 109 cycles(tsc) 30.532 ns (step:16) kmem-bulk Per elem: 64 cycles(tsc) 17.905 ns (step:16) More detailed description of benchmarks avail in [2]. [1] https://github.com/netoptimizer/prototype-kernel/tree/master/kernel/mm [2] https://github.com/xdp-project/xdp-project/blob/master/areas/mem/kfree_skb_list01.org V2: rename function to kfree_skb_add_bulk. Reviewed-by: Saeed Mahameed <saeed@kernel.org> Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r--net/core/skbuff.c40
1 files changed, 39 insertions, 1 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index ca2b4d6f9c9a..4e73ab3482b8 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -962,16 +962,54 @@ kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason)
}
EXPORT_SYMBOL(kfree_skb_reason);
+#define KFREE_SKB_BULK_SIZE 16
+
+struct skb_free_array {
+ unsigned int skb_count;
+ void *skb_array[KFREE_SKB_BULK_SIZE];
+};
+
+static void kfree_skb_add_bulk(struct sk_buff *skb,
+ struct skb_free_array *sa,
+ enum skb_drop_reason reason)
+{
+ /* if SKB is a clone, don't handle this case */
+ if (unlikely(skb->fclone != SKB_FCLONE_UNAVAILABLE)) {
+ __kfree_skb(skb);
+ return;
+ }
+
+ skb_release_all(skb, reason);
+ sa->skb_array[sa->skb_count++] = skb;
+
+ if (unlikely(sa->skb_count == KFREE_SKB_BULK_SIZE)) {
+ kmem_cache_free_bulk(skbuff_head_cache, KFREE_SKB_BULK_SIZE,
+ sa->skb_array);
+ sa->skb_count = 0;
+ }
+}
+
void __fix_address
kfree_skb_list_reason(struct sk_buff *segs, enum skb_drop_reason reason)
{
+ struct skb_free_array sa;
+
+ sa.skb_count = 0;
+
while (segs) {
struct sk_buff *next = segs->next;
+ skb_mark_not_on_list(segs);
+
if (__kfree_skb_reason(segs, reason))
- __kfree_skb(segs);
+ kfree_skb_add_bulk(segs, &sa, reason);
+
segs = next;
}
+
+ if (sa.skb_count)
+ kmem_cache_free_bulk(skbuff_head_cache, sa.skb_count,
+ sa.skb_array);
}
EXPORT_SYMBOL(kfree_skb_list_reason);