summaryrefslogtreecommitdiff
path: root/net/ipv4
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2023-09-22 22:03:56 +0000
committerPaolo Abeni <pabeni@redhat.com>2023-10-03 10:05:22 +0200
commit6532e257aa73645e28dee5b2232cc3c88be62083 (patch)
tree45163de0778a35d7a377062daa245e50c54464f5 /net/ipv4
parenta135798e6e200ecb2f864cecca6d257ba278370c (diff)
tcp_metrics: optimize tcp_metrics_flush_all()
This is inspired by several syzbot reports where tcp_metrics_flush_all() was seen in the traces. We can avoid acquiring tcp_metrics_lock for empty buckets, and we should add one cond_resched() to break potential long loops. Signed-off-by: Eric Dumazet <edumazet@google.com> Reviewed-by: David Ahern <dsahern@kernel.org> Acked-by: Neal Cardwell <ncardwell@google.com> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/tcp_metrics.c7
1 files changed, 5 insertions, 2 deletions
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index 7aca12c59c18..c2a925538542 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -898,11 +898,13 @@ static void tcp_metrics_flush_all(struct net *net)
unsigned int row;
for (row = 0; row < max_rows; row++, hb++) {
- struct tcp_metrics_block __rcu **pp;
+ struct tcp_metrics_block __rcu **pp = &hb->chain;
bool match;
+ if (!rcu_access_pointer(*pp))
+ continue;
+
spin_lock_bh(&tcp_metrics_lock);
- pp = &hb->chain;
for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
match = net ? net_eq(tm_net(tm), net) :
!refcount_read(&tm_net(tm)->ns.count);
@@ -914,6 +916,7 @@ static void tcp_metrics_flush_all(struct net *net)
}
}
spin_unlock_bh(&tcp_metrics_lock);
+ cond_resched();
}
}