diff options
author | David S. Miller <davem@davemloft.net> | 2017-01-05 11:49:57 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-01-05 11:49:57 -0500 |
commit | d896b3120b3391a2f95b2b8ec636e3f594d7f9c4 (patch) | |
tree | 65bb28c929691170f7d7d42adfd47d84eefd0ce4 /net/netfilter | |
parent | 9b60047a9c950e3fde186466774ffd1ab1104d4e (diff) | |
parent | 14221cc45caad2fcab3a8543234bb7eda9b540d5 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf
Pablo Neira Ayuso says:
====================
Netfilter fixes for net
The following patchset contains accumulated Netfilter fixes for your
net tree:
1) Ensure quota dump and reset happens iff we can deliver numbers to
userspace.
2) Silence splat on incorrect use of smp_processor_id() from nft_queue.
3) Fix an out-of-bound access reported by KASAN in
nf_tables_rule_destroy(), patch from Florian Westphal.
4) Fix layer 4 checksum mangling in the nf_tables payload expression
with IPv6.
5) Fix a race in the CLUSTERIP target from control plane path when two
threads run to add a new configuration object. Serialize invocations
of clusterip_config_init() using spin_lock. From Xin Long.
6) Call br_nf_pre_routing_finish_bridge_finish() once we are done with
the br_nf_pre_routing_finish() hook. From Artur Molchanov.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/netfilter')
-rw-r--r-- | net/netfilter/nf_tables_api.c | 2 | ||||
-rw-r--r-- | net/netfilter/nft_payload.c | 27 | ||||
-rw-r--r-- | net/netfilter/nft_queue.c | 2 | ||||
-rw-r--r-- | net/netfilter/nft_quota.c | 26 |
4 files changed, 35 insertions, 22 deletions
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index a019a87e58ee..0db5f9782265 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -2115,7 +2115,7 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx, * is called on error from nf_tables_newrule(). */ expr = nft_expr_first(rule); - while (expr->ops && expr != nft_expr_last(rule)) { + while (expr != nft_expr_last(rule) && expr->ops) { nf_tables_expr_destroy(ctx, expr); expr = nft_expr_next(expr); } diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c index 36d2b1096546..7d699bbd45b0 100644 --- a/net/netfilter/nft_payload.c +++ b/net/netfilter/nft_payload.c @@ -250,6 +250,22 @@ static int nft_payload_l4csum_update(const struct nft_pktinfo *pkt, return 0; } +static int nft_payload_csum_inet(struct sk_buff *skb, const u32 *src, + __wsum fsum, __wsum tsum, int csum_offset) +{ + __sum16 sum; + + if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0) + return -1; + + nft_csum_replace(&sum, fsum, tsum); + if (!skb_make_writable(skb, csum_offset + sizeof(sum)) || + skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0) + return -1; + + return 0; +} + static void nft_payload_set_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) @@ -259,7 +275,6 @@ static void nft_payload_set_eval(const struct nft_expr *expr, const u32 *src = ®s->data[priv->sreg]; int offset, csum_offset; __wsum fsum, tsum; - __sum16 sum; switch (priv->base) { case NFT_PAYLOAD_LL_HEADER: @@ -282,18 +297,14 @@ static void nft_payload_set_eval(const struct nft_expr *expr, csum_offset = offset + priv->csum_offset; offset += priv->offset; - if (priv->csum_type == NFT_PAYLOAD_CSUM_INET && + if ((priv->csum_type == NFT_PAYLOAD_CSUM_INET || priv->csum_flags) && (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER || skb->ip_summed != CHECKSUM_PARTIAL)) { - if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0) - goto err; - fsum = skb_checksum(skb, offset, priv->len, 0); tsum = csum_partial(src, priv->len, 0); - nft_csum_replace(&sum, fsum, tsum); - if (!skb_make_writable(skb, csum_offset + sizeof(sum)) || - skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0) + if (priv->csum_type == NFT_PAYLOAD_CSUM_INET && + nft_payload_csum_inet(skb, src, fsum, tsum, csum_offset)) goto err; if (priv->csum_flags && diff --git a/net/netfilter/nft_queue.c b/net/netfilter/nft_queue.c index 3e19fa1230dc..dbb6aaff67ec 100644 --- a/net/netfilter/nft_queue.c +++ b/net/netfilter/nft_queue.c @@ -38,7 +38,7 @@ static void nft_queue_eval(const struct nft_expr *expr, if (priv->queues_total > 1) { if (priv->flags & NFT_QUEUE_FLAG_CPU_FANOUT) { - int cpu = smp_processor_id(); + int cpu = raw_smp_processor_id(); queue = priv->queuenum + cpu % priv->queues_total; } else { diff --git a/net/netfilter/nft_quota.c b/net/netfilter/nft_quota.c index bd6efc53f26d..2d6fe3559912 100644 --- a/net/netfilter/nft_quota.c +++ b/net/netfilter/nft_quota.c @@ -110,30 +110,32 @@ static int nft_quota_obj_init(const struct nlattr * const tb[], static int nft_quota_do_dump(struct sk_buff *skb, struct nft_quota *priv, bool reset) { + u64 consumed, consumed_cap; u32 flags = priv->flags; - u64 consumed; - - if (reset) { - consumed = atomic64_xchg(&priv->consumed, 0); - if (test_and_clear_bit(NFT_QUOTA_DEPLETED_BIT, &priv->flags)) - flags |= NFT_QUOTA_F_DEPLETED; - } else { - consumed = atomic64_read(&priv->consumed); - } /* Since we inconditionally increment consumed quota for each packet * that we see, don't go over the quota boundary in what we send to * userspace. */ - if (consumed > priv->quota) - consumed = priv->quota; + consumed = atomic64_read(&priv->consumed); + if (consumed >= priv->quota) { + consumed_cap = priv->quota; + flags |= NFT_QUOTA_F_DEPLETED; + } else { + consumed_cap = consumed; + } if (nla_put_be64(skb, NFTA_QUOTA_BYTES, cpu_to_be64(priv->quota), NFTA_QUOTA_PAD) || - nla_put_be64(skb, NFTA_QUOTA_CONSUMED, cpu_to_be64(consumed), + nla_put_be64(skb, NFTA_QUOTA_CONSUMED, cpu_to_be64(consumed_cap), NFTA_QUOTA_PAD) || nla_put_be32(skb, NFTA_QUOTA_FLAGS, htonl(flags))) goto nla_put_failure; + + if (reset) { + atomic64_sub(consumed, &priv->consumed); + clear_bit(NFT_QUOTA_DEPLETED_BIT, &priv->flags); + } return 0; nla_put_failure: |