summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorDaniel Borkmann <dborkman@redhat.com>2014-08-27 11:11:27 +0200
committerDavid S. Miller <davem@davemloft.net>2014-08-29 20:02:07 -0700
commit10c51b56232d24f150e39884a9e749fd99cbc60c (patch)
tree4baa9297a591c45bbb1a148bfc3c9c1822f6ce3a /net
parenta3bf5c429eb5f5ec4d364d51dfa8855efcc005f8 (diff)
net: add skb_get_tx_queue() helper
Replace occurences of skb_get_queue_mapping() and follow-up netdev_get_tx_queue() with an actual helper function. Signed-off-by: Daniel Borkmann <dborkman@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/netpoll.c2
-rw-r--r--net/core/pktgen.c4
-rw-r--r--net/packet/af_packet.c4
-rw-r--r--net/sched/sch_generic.c6
4 files changed, 7 insertions, 9 deletions
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index a5ad06828d67..12b1df976562 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -115,7 +115,7 @@ static void queue_process(struct work_struct *work)
continue;
}
- txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
+ txq = skb_get_tx_queue(dev, skb);
local_irq_save(flags);
HARD_TX_LOCK(dev, txq, smp_processor_id());
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 83e2b4b19eb7..d81b540096c3 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3286,7 +3286,6 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
{
struct net_device *odev = pkt_dev->odev;
struct netdev_queue *txq;
- u16 queue_map;
int ret;
/* If device is offline, then don't send */
@@ -3324,8 +3323,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
if (pkt_dev->delay && pkt_dev->last_ok)
spin(pkt_dev, pkt_dev->next_tx);
- queue_map = skb_get_queue_mapping(pkt_dev->skb);
- txq = netdev_get_tx_queue(odev, queue_map);
+ txq = skb_get_tx_queue(odev, pkt_dev->skb);
local_bh_disable();
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 0dfa990d4eaa..b7a7f5a721bd 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -243,7 +243,6 @@ static int packet_direct_xmit(struct sk_buff *skb)
netdev_features_t features;
struct netdev_queue *txq;
int ret = NETDEV_TX_BUSY;
- u16 queue_map;
if (unlikely(!netif_running(dev) ||
!netif_carrier_ok(dev)))
@@ -254,8 +253,7 @@ static int packet_direct_xmit(struct sk_buff *skb)
__skb_linearize(skb))
goto drop;
- queue_map = skb_get_queue_mapping(skb);
- txq = netdev_get_tx_queue(dev, queue_map);
+ txq = skb_get_tx_queue(dev, skb);
local_bh_disable();
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index fc04fe93c2da..05b3f5d104af 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -63,7 +63,7 @@ static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
if (unlikely(skb)) {
/* check the reason of requeuing without tx lock first */
- txq = netdev_get_tx_queue(txq->dev, skb_get_queue_mapping(skb));
+ txq = skb_get_tx_queue(txq->dev, skb);
if (!netif_xmit_frozen_or_stopped(txq)) {
q->gso_skb = NULL;
q->q.qlen--;
@@ -183,10 +183,12 @@ static inline int qdisc_restart(struct Qdisc *q)
skb = dequeue_skb(q);
if (unlikely(!skb))
return 0;
+
WARN_ON_ONCE(skb_dst_is_noref(skb));
+
root_lock = qdisc_lock(q);
dev = qdisc_dev(q);
- txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
+ txq = skb_get_tx_queue(dev, skb);
return sch_direct_xmit(skb, q, dev, txq, root_lock);
}