diff options
author | Eric Dumazet <edumazet@google.com> | 2020-05-02 19:54:19 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2020-05-03 15:50:31 -0700 |
commit | 7ba0537c2b534149be288f851900b4cf5aacde48 (patch) | |
tree | 4f8a6cce1cb4f0236b5d7dd15e17948f5abffb1d /net/sched/sch_fq.c | |
parent | dde0a648fc00e2156a3358600c5fbfb3f53256ac (diff) |
net_sched: sch_fq: change fq_flow size/layout
sizeof(struct fq_flow) is 112 bytes on 64bit arches.
This means that half of them use two cache lines, but 50% use
three cache lines.
This patch adds cache line alignment, and makes sure that only
the first cache line is touched by fq_enqueue(), which is more
expensive that fq_dequeue() in general.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_fq.c')
-rw-r--r-- | net/sched/sch_fq.c | 9 |
1 files changed, 7 insertions, 2 deletions
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index bc9ca1ba507b..ced1f987d7e4 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c @@ -66,6 +66,7 @@ static inline struct fq_skb_cb *fq_skb_cb(struct sk_buff *skb) * in linear list (head,tail), otherwise are placed in a rbtree (t_root). */ struct fq_flow { +/* First cache line : used in fq_gc(), fq_enqueue(), fq_dequeue() */ struct rb_root t_root; struct sk_buff *head; /* list of skbs for this flow : first skb */ union { @@ -74,14 +75,18 @@ struct fq_flow { }; struct rb_node fq_node; /* anchor in fq_root[] trees */ struct sock *sk; + u32 socket_hash; /* sk_hash */ int qlen; /* number of packets in flow queue */ + +/* Second cache line, used in fq_dequeue() */ int credit; - u32 socket_hash; /* sk_hash */ + /* 32bit hole on 64bit arches */ + struct fq_flow *next; /* next pointer in RR lists */ struct rb_node rate_node; /* anchor in q->delayed tree */ u64 time_next_packet; -}; +} ____cacheline_aligned_in_smp; struct fq_flow_head { struct fq_flow *first; |