diff options
author | Florian Westphal <fw@strlen.de> | 2016-11-22 14:44:19 +0100 |
---|---|---|
committer | Pablo Neira Ayuso <pablo@netfilter.org> | 2016-12-06 21:42:19 +0100 |
commit | ae0ac0ed6fcf5af3be0f63eb935f483f44a402d2 (patch) | |
tree | bdc9adb5d4744f958630b4e56defc818c905dbe3 /net/netfilter | |
parent | f28e15bacedd444608e25421c72eb2cf4527c9ca (diff) |
netfilter: x_tables: pack percpu counter allocations
instead of allocating each xt_counter individually, allocate 4k chunks
and then use these for counter allocation requests.
This should speed up rule evaluation by increasing data locality,
also speeds up ruleset loading because we reduce calls to the percpu
allocator.
As Eric points out we can't use PAGE_SIZE, page_allocator would fail on
arches with 64k page size.
Suggested-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Florian Westphal <fw@strlen.de>
Acked-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Diffstat (limited to 'net/netfilter')
-rw-r--r-- | net/netfilter/x_tables.c | 33 |
1 files changed, 24 insertions, 9 deletions
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index be5e83047594..f6ce4a7036e6 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -40,6 +40,7 @@ MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module"); #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1)) +#define XT_PCPU_BLOCK_SIZE 4096 struct compat_delta { unsigned int offset; /* offset in kernel */ @@ -1618,6 +1619,7 @@ EXPORT_SYMBOL_GPL(xt_proto_fini); /** * xt_percpu_counter_alloc - allocate x_tables rule counter * + * @state: pointer to xt_percpu allocation state * @counter: pointer to counter struct inside the ip(6)/arpt_entry struct * * On SMP, the packet counter [ ip(6)t_entry->counters.pcnt ] will then @@ -1626,21 +1628,34 @@ EXPORT_SYMBOL_GPL(xt_proto_fini); * Rule evaluation needs to use xt_get_this_cpu_counter() helper * to fetch the real percpu counter. * + * To speed up allocation and improve data locality, a 4kb block is + * allocated. + * + * xt_percpu_counter_alloc_state contains the base address of the + * allocated page and the current sub-offset. + * * returns false on error. */ -bool xt_percpu_counter_alloc(struct xt_counters *counter) +bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state, + struct xt_counters *counter) { - void __percpu *res; + BUILD_BUG_ON(XT_PCPU_BLOCK_SIZE < (sizeof(*counter) * 2)); if (nr_cpu_ids <= 1) return true; - res = __alloc_percpu(sizeof(struct xt_counters), - sizeof(struct xt_counters)); - if (!res) - return false; - - counter->pcnt = (__force unsigned long)res; + if (!state->mem) { + state->mem = __alloc_percpu(XT_PCPU_BLOCK_SIZE, + XT_PCPU_BLOCK_SIZE); + if (!state->mem) + return false; + } + counter->pcnt = (__force unsigned long)(state->mem + state->off); + state->off += sizeof(*counter); + if (state->off > (XT_PCPU_BLOCK_SIZE - sizeof(*counter))) { + state->mem = NULL; + state->off = 0; + } return true; } EXPORT_SYMBOL_GPL(xt_percpu_counter_alloc); @@ -1649,7 +1664,7 @@ void xt_percpu_counter_free(struct xt_counters *counters) { unsigned long pcnt = counters->pcnt; - if (nr_cpu_ids > 1) + if (nr_cpu_ids > 1 && (pcnt & (XT_PCPU_BLOCK_SIZE - 1)) == 0) free_percpu((void __percpu *)pcnt); } EXPORT_SYMBOL_GPL(xt_percpu_counter_free); |