diff options
author | David S. Miller <davem@davemloft.net> | 2020-05-07 18:11:07 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2020-05-07 18:11:07 -0700 |
commit | 738fea32af86f5d58f30dfca6645494070c976ef (patch) | |
tree | 5d1540325ac32853f85625cdb0262934f8244dbc /net/core | |
parent | 3a13f98b4c16fb3489bdfd7550fcaa333ee69850 (diff) | |
parent | ae46f184bc1fb15bf2de47114c29236e61ca4bbc (diff) |
Merge branch 'bonding-report-transmit-status-to-callers'
Eric Dumazet says:
====================
bonding: report transmit status to callers
First patches cleanup netpoll, and make sure it provides tx status to its users.
Last patch changes bonding to not pretend packets were sent without error.
By providing more accurate status, TCP stack can avoid adding more
packets if the slave qdisc is already full.
This came while testing latest horizon feature in sch_fq, with
very low pacing rate flows, but should benefit hosts under stress.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/netpoll.c | 29 |
1 files changed, 24 insertions, 5 deletions
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 15b366a1a958..093e90e52bc2 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -305,20 +305,22 @@ static int netpoll_owner_active(struct net_device *dev) } /* call with IRQ disabled */ -void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, - struct net_device *dev) +static netdev_tx_t __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) { netdev_tx_t status = NETDEV_TX_BUSY; + struct net_device *dev; unsigned long tries; /* It is up to the caller to keep npinfo alive. */ struct netpoll_info *npinfo; lockdep_assert_irqs_disabled(); - npinfo = rcu_dereference_bh(np->dev->npinfo); + dev = np->dev; + npinfo = rcu_dereference_bh(dev->npinfo); + if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) { dev_kfree_skb_irq(skb); - return; + return NET_XMIT_DROP; } /* don't get messages out of order, and no recursion */ @@ -357,8 +359,25 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, skb_queue_tail(&npinfo->txq, skb); schedule_delayed_work(&npinfo->tx_work,0); } + return NETDEV_TX_OK; +} + +netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) +{ + unsigned long flags; + netdev_tx_t ret; + + if (unlikely(!np)) { + dev_kfree_skb_irq(skb); + ret = NET_XMIT_DROP; + } else { + local_irq_save(flags); + ret = __netpoll_send_skb(np, skb); + local_irq_restore(flags); + } + return ret; } -EXPORT_SYMBOL(netpoll_send_skb_on_dev); +EXPORT_SYMBOL(netpoll_send_skb); void netpoll_send_udp(struct netpoll *np, const char *msg, int len) { |