diff options
author | Eric Dumazet <edumazet@google.com> | 2022-02-10 13:42:30 -0800 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2022-02-11 11:44:27 +0000 |
commit | ba55ef81637c61e52772bcd4e5fba10ec7190d2a (patch) | |
tree | 4dc9a9ebe9ab9606211f6fd54e28cb5d5740a632 /net/ipv6/route.c | |
parent | e5f80fcf869a18cc750d6b350bbfac82df292e0b (diff) |
ipv6: add (struct uncached_list)->quarantine list
This is an optimization to keep the per-cpu lists as short as possible:
Whenever rt6_uncached_list_flush_dev() changes one rt6_info
matching the disappearing device, it can can transfer the object
to a quarantine list, waiting for a final rt6_uncached_list_del().
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6/route.c')
-rw-r--r-- | net/ipv6/route.c | 17 |
1 files changed, 14 insertions, 3 deletions
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index b04aaf13e41e..46c7a1b36075 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -130,6 +130,7 @@ static struct fib6_info *rt6_get_route_info(struct net *net, struct uncached_list { spinlock_t lock; struct list_head head; + struct list_head quarantine; }; static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list); @@ -151,7 +152,7 @@ void rt6_uncached_list_del(struct rt6_info *rt) struct uncached_list *ul = rt->rt6i_uncached_list; spin_lock_bh(&ul->lock); - list_del(&rt->rt6i_uncached); + list_del_init(&rt->rt6i_uncached); spin_unlock_bh(&ul->lock); } } @@ -162,16 +163,21 @@ static void rt6_uncached_list_flush_dev(struct net_device *dev) for_each_possible_cpu(cpu) { struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu); - struct rt6_info *rt; + struct rt6_info *rt, *safe; + + if (list_empty(&ul->head)) + continue; spin_lock_bh(&ul->lock); - list_for_each_entry(rt, &ul->head, rt6i_uncached) { + list_for_each_entry_safe(rt, safe, &ul->head, rt6i_uncached) { struct inet6_dev *rt_idev = rt->rt6i_idev; struct net_device *rt_dev = rt->dst.dev; + bool handled = false; if (rt_idev->dev == dev) { rt->rt6i_idev = in6_dev_get(blackhole_netdev); in6_dev_put(rt_idev); + handled = true; } if (rt_dev == dev) { @@ -179,7 +185,11 @@ static void rt6_uncached_list_flush_dev(struct net_device *dev) dev_replace_track(rt_dev, blackhole_netdev, &rt->dst.dev_tracker, GFP_ATOMIC); + handled = true; } + if (handled) + list_move(&rt->rt6i_uncached, + &ul->quarantine); } spin_unlock_bh(&ul->lock); } @@ -6727,6 +6737,7 @@ int __init ip6_route_init(void) struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu); INIT_LIST_HEAD(&ul->head); + INIT_LIST_HEAD(&ul->quarantine); spin_lock_init(&ul->lock); } |