From eac87c413bf9794c14d488998a5265ea5b32f04e Mon Sep 17 00:00:00 2001 From: Eelco Chaudron Date: Wed, 15 Jul 2020 14:09:28 +0200 Subject: net: openvswitch: reorder masks array based on usage This patch reorders the masks array every 4 seconds based on their usage count. This greatly reduces the masks per packet hit, and hence the overall performance. Especially in the OVS/OVN case for OpenShift. Here are some results from the OVS/OVN OpenShift test, which use 8 pods, each pod having 512 uperf connections, each connection sends a 64-byte request and gets a 1024-byte response (TCP). All uperf clients are on 1 worker node while all uperf servers are on the other worker node. Kernel without this patch : 7.71 Gbps Kernel with this patch applied: 14.52 Gbps We also run some tests to verify the rebalance activity does not lower the flow insertion rate, which does not. Signed-off-by: Eelco Chaudron Tested-by: Andrew Theurer Reviewed-by: Paolo Abeni Signed-off-by: David S. Miller --- net/openvswitch/flow_table.h | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'net/openvswitch/flow_table.h') diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h index 8a5cea6ae111..1f664b050e3b 100644 --- a/net/openvswitch/flow_table.h +++ b/net/openvswitch/flow_table.h @@ -27,9 +27,17 @@ struct mask_cache_entry { u32 mask_index; }; +struct mask_count { + int index; + u64 counter; +}; + struct mask_array { struct rcu_head rcu; int count, max; + u64 __percpu *masks_usage_cntr; + u64 *masks_usage_zero_cntr; + struct u64_stats_sync syncp; struct sw_flow_mask __rcu *masks[]; }; @@ -86,4 +94,7 @@ bool ovs_flow_cmp(const struct sw_flow *, const struct sw_flow_match *); void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, bool full, const struct sw_flow_mask *mask); + +void ovs_flow_masks_rebalance(struct flow_table *table); + #endif /* flow_table.h */ -- cgit v1.2.3